blob: 9ce2c0f254f3410a3b6373543698d9f6c6c5b09d [file] [log] [blame]
Stefan Roese1c60fe72014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -060029#include <log.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010030#include <asm/io.h>
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053031#include <dma.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060032#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060033#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090034#include <linux/errno.h>
Marek Vasutdae51dd2016-04-27 23:18:55 +020035#include <wait_bit.h>
Vignesh R4ca60192016-07-06 10:20:56 +053036#include <spi.h>
Vignesh Raghavendra27516a32020-01-27 10:36:39 +053037#include <spi-mem.h>
Vignesh Rad4bd8a2018-01-24 10:44:07 +053038#include <malloc.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010039#include "cadence_qspi.h"
40
T Karthik Reddy3b49fbf2022-05-12 04:05:34 -060041__weak void cadence_qspi_apb_enable_linear_mode(bool enable)
42{
43 return;
44}
45
Stefan Roese1c60fe72014-11-07 12:37:49 +010046void cadence_qspi_apb_controller_enable(void *reg_base)
47{
48 unsigned int reg;
49 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +000050 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +010051 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +010052}
53
54void cadence_qspi_apb_controller_disable(void *reg_base)
55{
56 unsigned int reg;
57 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +000058 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +010059 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +010060}
61
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053062void cadence_qspi_apb_dac_mode_enable(void *reg_base)
63{
64 unsigned int reg;
65
66 reg = readl(reg_base + CQSPI_REG_CONFIG);
67 reg |= CQSPI_REG_CONFIG_DIRECT;
68 writel(reg, reg_base + CQSPI_REG_CONFIG);
69}
70
Pratyush Yadave1814ad2021-06-26 00:47:09 +053071static unsigned int cadence_qspi_calc_dummy(const struct spi_mem_op *op,
72 bool dtr)
73{
74 unsigned int dummy_clk;
75
Marek Vasut545be192021-09-14 05:21:48 +020076 if (!op->dummy.nbytes || !op->dummy.buswidth)
77 return 0;
78
Pratyush Yadave1814ad2021-06-26 00:47:09 +053079 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
80 if (dtr)
81 dummy_clk /= 2;
82
83 return dummy_clk;
84}
85
Ashok Reddy Somaf5817652022-08-24 05:38:47 -060086static u32 cadence_qspi_calc_rdreg(struct cadence_spi_priv *priv)
Pratyush Yadave1814ad2021-06-26 00:47:09 +053087{
88 u32 rdreg = 0;
89
Ashok Reddy Somaf5817652022-08-24 05:38:47 -060090 rdreg |= priv->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
91 rdreg |= priv->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
92 rdreg |= priv->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
Pratyush Yadave1814ad2021-06-26 00:47:09 +053093
94 return rdreg;
95}
96
97static int cadence_qspi_buswidth_to_inst_type(u8 buswidth)
98{
99 switch (buswidth) {
100 case 0:
101 case 1:
102 return CQSPI_INST_TYPE_SINGLE;
103
104 case 2:
105 return CQSPI_INST_TYPE_DUAL;
106
107 case 4:
108 return CQSPI_INST_TYPE_QUAD;
109
110 case 8:
111 return CQSPI_INST_TYPE_OCTAL;
112
113 default:
114 return -ENOTSUPP;
115 }
116}
117
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600118static int cadence_qspi_set_protocol(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530119 const struct spi_mem_op *op)
120{
121 int ret;
122
Apurva Nandanb88f55c2023-04-12 16:28:54 +0530123 /*
124 * For an op to be DTR, cmd phase along with every other non-empty
125 * phase should have dtr field set to 1. If an op phase has zero
126 * nbytes, ignore its dtr field; otherwise, check its dtr field.
127 * Also, dummy checks not performed here Since supports_op()
128 * already checks that all or none of the fields are DTR.
129 */
130 priv->dtr = op->cmd.dtr &&
131 (!op->addr.nbytes || op->addr.dtr) &&
132 (!op->data.nbytes || op->data.dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530133
134 ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
135 if (ret < 0)
136 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600137 priv->inst_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530138
139 ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
140 if (ret < 0)
141 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600142 priv->addr_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530143
144 ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
145 if (ret < 0)
146 return ret;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600147 priv->data_width = ret;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530148
149 return 0;
150}
151
Stefan Roese1c60fe72014-11-07 12:37:49 +0100152/* Return 1 if idle, otherwise return 0 (busy). */
153static unsigned int cadence_qspi_wait_idle(void *reg_base)
154{
155 unsigned int start, count = 0;
156 /* timeout in unit of ms */
157 unsigned int timeout = 5000;
158
159 start = get_timer(0);
160 for ( ; get_timer(start) < timeout ; ) {
161 if (CQSPI_REG_IS_IDLE(reg_base))
162 count++;
163 else
164 count = 0;
165 /*
166 * Ensure the QSPI controller is in true idle state after
167 * reading back the same idle status consecutively
168 */
169 if (count >= CQSPI_POLL_IDLE_RETRY)
170 return 1;
171 }
172
173 /* Timeout, still in busy mode. */
174 printf("QSPI: QSPI is still busy after poll for %d times.\n",
175 CQSPI_REG_RETRY);
176 return 0;
177}
178
179void cadence_qspi_apb_readdata_capture(void *reg_base,
180 unsigned int bypass, unsigned int delay)
181{
182 unsigned int reg;
183 cadence_qspi_apb_controller_disable(reg_base);
184
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000185 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100186
187 if (bypass)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000188 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100189 else
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000190 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100191
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000192 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
193 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100194
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000195 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
196 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100197
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000198 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100199
200 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100201}
202
203void cadence_qspi_apb_config_baudrate_div(void *reg_base,
204 unsigned int ref_clk_hz, unsigned int sclk_hz)
205{
206 unsigned int reg;
207 unsigned int div;
208
209 cadence_qspi_apb_controller_disable(reg_base);
210 reg = readl(reg_base + CQSPI_REG_CONFIG);
211 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
212
Phil Edworthy8f24a442016-11-29 12:58:27 +0000213 /*
214 * The baud_div field in the config reg is 4 bits, and the ref clock is
215 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
216 * SPI clock rate is less than or equal to the requested clock rate.
217 */
218 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100219
Chin Liang See91b2c192016-08-07 22:50:40 +0800220 /* ensure the baud rate doesn't exceed the max value */
221 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
222 div = CQSPI_REG_CONFIG_BAUD_MASK;
223
Phil Edworthy67824ad2016-11-29 12:58:28 +0000224 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
225 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
226
Chin Liang See91b2c192016-08-07 22:50:40 +0800227 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100228 writel(reg, reg_base + CQSPI_REG_CONFIG);
229
230 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100231}
232
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000233void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100234{
235 unsigned int reg;
236
237 cadence_qspi_apb_controller_disable(reg_base);
238 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000239 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100240
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000241 if (mode & SPI_CPOL)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000242 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000243 if (mode & SPI_CPHA)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000244 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100245
246 writel(reg, reg_base + CQSPI_REG_CONFIG);
247
248 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100249}
250
251void cadence_qspi_apb_chipselect(void *reg_base,
252 unsigned int chip_select, unsigned int decoder_enable)
253{
254 unsigned int reg;
255
256 cadence_qspi_apb_controller_disable(reg_base);
257
258 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
259 decoder_enable);
260
261 reg = readl(reg_base + CQSPI_REG_CONFIG);
262 /* docoder */
263 if (decoder_enable) {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000264 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100265 } else {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000266 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100267 /* Convert CS if without decoder.
268 * CS0 to 4b'1110
269 * CS1 to 4b'1101
270 * CS2 to 4b'1011
271 * CS3 to 4b'0111
272 */
273 chip_select = 0xF & ~(1 << chip_select);
274 }
275
276 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
277 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
278 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
279 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
280 writel(reg, reg_base + CQSPI_REG_CONFIG);
281
282 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100283}
284
285void cadence_qspi_apb_delay(void *reg_base,
286 unsigned int ref_clk, unsigned int sclk_hz,
287 unsigned int tshsl_ns, unsigned int tsd2d_ns,
288 unsigned int tchsh_ns, unsigned int tslch_ns)
289{
290 unsigned int ref_clk_ns;
291 unsigned int sclk_ns;
292 unsigned int tshsl, tchsh, tslch, tsd2d;
293 unsigned int reg;
294
295 cadence_qspi_apb_controller_disable(reg_base);
296
297 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000298 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100299
300 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000301 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100302
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000303 /* The controller adds additional delay to that programmed in the reg */
304 if (tshsl_ns >= sclk_ns + ref_clk_ns)
305 tshsl_ns -= sclk_ns + ref_clk_ns;
306 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
307 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
308 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
309 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
310 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
311 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100312
313 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
314 << CQSPI_REG_DELAY_TSHSL_LSB);
315 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
316 << CQSPI_REG_DELAY_TCHSH_LSB);
317 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
318 << CQSPI_REG_DELAY_TSLCH_LSB);
319 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
320 << CQSPI_REG_DELAY_TSD2D_LSB);
321 writel(reg, reg_base + CQSPI_REG_DELAY);
322
323 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100324}
325
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600326void cadence_qspi_apb_controller_init(struct cadence_spi_priv *priv)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100327{
328 unsigned reg;
329
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600330 cadence_qspi_apb_controller_disable(priv->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100331
332 /* Configure the device size and address bytes */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600333 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100334 /* Clear the previous value */
335 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
336 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600337 reg |= (priv->page_size << CQSPI_REG_SIZE_PAGE_LSB);
338 reg |= (priv->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
339 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100340
341 /* Configure the remap address register, no remap */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600342 writel(0, priv->regbase + CQSPI_REG_REMAP);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100343
Vikas Manocha215cea02015-07-02 18:29:43 -0700344 /* Indirect mode configurations */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600345 writel(priv->fifo_depth / 2, priv->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manocha215cea02015-07-02 18:29:43 -0700346
Stefan Roese1c60fe72014-11-07 12:37:49 +0100347 /* Disable all interrupts */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600348 writel(0, priv->regbase + CQSPI_REG_IRQMASK);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100349
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600350 cadence_qspi_apb_controller_enable(priv->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100351}
352
T Karthik Reddy73701e72022-05-12 04:05:32 -0600353int cadence_qspi_apb_exec_flash_cmd(void *reg_base, unsigned int reg)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100354{
355 unsigned int retry = CQSPI_REG_RETRY;
356
357 /* Write the CMDCTRL without start execution. */
358 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
359 /* Start execute */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000360 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100361 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
362
363 while (retry--) {
364 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000365 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100366 break;
367 udelay(1);
368 }
369
370 if (!retry) {
371 printf("QSPI: flash command execution timeout\n");
372 return -EIO;
373 }
374
375 /* Polling QSPI idle status. */
376 if (!cadence_qspi_wait_idle(reg_base))
377 return -EIO;
378
Dhruva Gole94fcaf02023-04-12 16:28:56 +0530379 /* Flush the CMDCTRL reg after the execution */
380 writel(0, reg_base + CQSPI_REG_CMDCTRL);
381
Stefan Roese1c60fe72014-11-07 12:37:49 +0100382 return 0;
383}
384
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600385static int cadence_qspi_setup_opcode_ext(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530386 const struct spi_mem_op *op,
387 unsigned int shift)
388{
389 unsigned int reg;
390 u8 ext;
391
392 if (op->cmd.nbytes != 2)
393 return -EINVAL;
394
395 /* Opcode extension is the LSB. */
396 ext = op->cmd.opcode & 0xff;
397
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600398 reg = readl(priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530399 reg &= ~(0xff << shift);
400 reg |= ext << shift;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600401 writel(reg, priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530402
403 return 0;
404}
405
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600406static int cadence_qspi_enable_dtr(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530407 const struct spi_mem_op *op,
408 unsigned int shift,
409 bool enable)
410{
411 unsigned int reg;
412 int ret;
413
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600414 reg = readl(priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530415
416 if (enable) {
417 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
418 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
419
420 /* Set up command opcode extension. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600421 ret = cadence_qspi_setup_opcode_ext(priv, op, shift);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530422 if (ret)
423 return ret;
424 } else {
425 reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
426 reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
427 }
428
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600429 writel(reg, priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530430
431 return 0;
432}
433
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600434int cadence_qspi_apb_command_read_setup(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530435 const struct spi_mem_op *op)
436{
437 int ret;
438 unsigned int reg;
439
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600440 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530441 if (ret)
442 return ret;
443
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600444 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
445 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530446 if (ret)
447 return ret;
448
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600449 reg = cadence_qspi_calc_rdreg(priv);
450 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530451
452 return 0;
453}
454
Stefan Roese1c60fe72014-11-07 12:37:49 +0100455/* For command RDID, RDSR. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600456int cadence_qspi_apb_command_read(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530457 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100458{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600459 void *reg_base = priv->regbase;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100460 unsigned int reg;
461 unsigned int read_len;
462 int status;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530463 unsigned int rxlen = op->data.nbytes;
464 void *rxbuf = op->data.buf.in;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530465 unsigned int dummy_clk;
466 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100467
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600468 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530469 opcode = op->cmd.opcode >> 8;
470 else
471 opcode = op->cmd.opcode;
472
473 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
474
475 /* Set up dummy cycles. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600476 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530477 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
478 return -ENOTSUPP;
479
480 if (dummy_clk)
481 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
482 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100483
484 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
485
486 /* 0 means 1 byte. */
487 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
488 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
Dhruva Gole24d8de62023-01-03 12:01:11 +0530489
490 /* setup ADDR BIT field */
491 if (op->addr.nbytes) {
492 writel(op->addr.val, priv->regbase + CQSPI_REG_CMDADDRESS);
493 /*
494 * address bytes are zero indexed
495 */
496 reg |= (((op->addr.nbytes - 1) &
497 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
498 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB);
499 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
500 }
501
Stefan Roese1c60fe72014-11-07 12:37:49 +0100502 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
503 if (status != 0)
504 return status;
505
506 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
507
508 /* Put the read value into rx_buf */
509 read_len = (rxlen > 4) ? 4 : rxlen;
510 memcpy(rxbuf, &reg, read_len);
511 rxbuf += read_len;
512
513 if (rxlen > 4) {
514 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
515
516 read_len = rxlen - read_len;
517 memcpy(rxbuf, &reg, read_len);
518 }
519 return 0;
520}
521
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600522int cadence_qspi_apb_command_write_setup(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530523 const struct spi_mem_op *op)
524{
525 int ret;
526 unsigned int reg;
527
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600528 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530529 if (ret)
530 return ret;
531
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600532 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
533 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530534 if (ret)
535 return ret;
536
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600537 reg = cadence_qspi_calc_rdreg(priv);
538 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530539
540 return 0;
541}
542
Stefan Roese1c60fe72014-11-07 12:37:49 +0100543/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600544int cadence_qspi_apb_command_write(struct cadence_spi_priv *priv,
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530545 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100546{
547 unsigned int reg = 0;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100548 unsigned int wr_data;
549 unsigned int wr_len;
Apurva Nandan52ff9b92023-04-12 16:28:55 +0530550 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530551 unsigned int txlen = op->data.nbytes;
552 const void *txbuf = op->data.buf.out;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600553 void *reg_base = priv->regbase;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530554 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100555
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600556 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530557 opcode = op->cmd.opcode >> 8;
558 else
559 opcode = op->cmd.opcode;
560
561 reg |= opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530562
Apurva Nandan52ff9b92023-04-12 16:28:55 +0530563 /* setup ADDR BIT field */
564 if (op->addr.nbytes) {
565 writel(op->addr.val, priv->regbase + CQSPI_REG_CMDADDRESS);
566 /*
567 * address bytes are zero indexed
568 */
569 reg |= (((op->addr.nbytes - 1) &
570 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
571 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB);
572 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
573 }
574
575 /* Set up dummy cycles. */
576 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
577 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
578 return -EOPNOTSUPP;
579
580 if (dummy_clk)
581 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
582 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
583
Stefan Roese1c60fe72014-11-07 12:37:49 +0100584 if (txlen) {
585 /* writing data = yes */
586 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
587 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
588 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
589
590 wr_len = txlen > 4 ? 4 : txlen;
591 memcpy(&wr_data, txbuf, wr_len);
592 writel(wr_data, reg_base +
593 CQSPI_REG_CMDWRITEDATALOWER);
594
595 if (txlen > 4) {
596 txbuf += wr_len;
597 wr_len = txlen - wr_len;
598 memcpy(&wr_data, txbuf, wr_len);
599 writel(wr_data, reg_base +
600 CQSPI_REG_CMDWRITEDATAUPPER);
601 }
602 }
603
604 /* Execute the command */
605 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
606}
607
608/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600609int cadence_qspi_apb_read_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530610 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100611{
612 unsigned int reg;
613 unsigned int rd_reg;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100614 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530615 unsigned int dummy_bytes = op->dummy.nbytes;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530616 int ret;
617 u8 opcode;
618
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600619 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530620 if (ret)
621 return ret;
622
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600623 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_READ_LSB,
624 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530625 if (ret)
626 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100627
628 /* Setup the indirect trigger address */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600629 writel(priv->trigger_address,
630 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100631
Stefan Roese1c60fe72014-11-07 12:37:49 +0100632 /* Configure the opcode */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600633 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530634 opcode = op->cmd.opcode >> 8;
635 else
636 opcode = op->cmd.opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100637
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530638 rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600639 rd_reg |= cadence_qspi_calc_rdreg(priv);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100640
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600641 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100642
Stefan Roese1c60fe72014-11-07 12:37:49 +0100643 if (dummy_bytes) {
Stefan Roese1c60fe72014-11-07 12:37:49 +0100644 /* Convert to clock cycles. */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600645 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530646
647 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
648 return -ENOTSUPP;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100649
650 if (dummy_clk)
651 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
652 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
653 }
654
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600655 writel(rd_reg, priv->regbase + CQSPI_REG_RD_INSTR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100656
657 /* set device size */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600658 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100659 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530660 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600661 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100662 return 0;
663}
664
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600665static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_priv *priv)
Marek Vasut8c177432016-04-27 23:38:05 +0200666{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600667 u32 reg = readl(priv->regbase + CQSPI_REG_SDRAMLEVEL);
Marek Vasut8c177432016-04-27 23:38:05 +0200668 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
669 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
670}
671
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600672static int cadence_qspi_wait_for_data(struct cadence_spi_priv *priv)
Marek Vasut8c177432016-04-27 23:38:05 +0200673{
674 unsigned int timeout = 10000;
675 u32 reg;
676
677 while (timeout--) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600678 reg = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200679 if (reg)
680 return reg;
681 udelay(1);
682 }
683
684 return -ETIMEDOUT;
685}
686
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530687static int
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600688cadence_qspi_apb_indirect_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530689 unsigned int n_rx, u8 *rxbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100690{
Marek Vasut8c177432016-04-27 23:38:05 +0200691 unsigned int remaining = n_rx;
692 unsigned int bytes_to_read = 0;
693 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100694
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600695 writel(n_rx, priv->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100696
697 /* Start the indirect read transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000698 writel(CQSPI_REG_INDIRECTRD_START,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600699 priv->regbase + CQSPI_REG_INDIRECTRD);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100700
Marek Vasut8c177432016-04-27 23:38:05 +0200701 while (remaining > 0) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600702 ret = cadence_qspi_wait_for_data(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200703 if (ret < 0) {
704 printf("Indirect write timed out (%i)\n", ret);
705 goto failrd;
706 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100707
Marek Vasut8c177432016-04-27 23:38:05 +0200708 bytes_to_read = ret;
709
710 while (bytes_to_read != 0) {
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600711 bytes_to_read *= priv->fifo_width;
Marek Vasut8c177432016-04-27 23:38:05 +0200712 bytes_to_read = bytes_to_read > remaining ?
713 remaining : bytes_to_read;
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530714 /*
715 * Handle non-4-byte aligned access to avoid
716 * data abort.
717 */
718 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600719 readsb(priv->ahbbase, rxbuf, bytes_to_read);
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530720 else
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600721 readsl(priv->ahbbase, rxbuf,
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530722 bytes_to_read >> 2);
723 rxbuf += bytes_to_read;
Marek Vasut8c177432016-04-27 23:38:05 +0200724 remaining -= bytes_to_read;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600725 bytes_to_read = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut8c177432016-04-27 23:38:05 +0200726 }
727 }
728
729 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600730 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100731 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut8c177432016-04-27 23:38:05 +0200732 if (ret) {
733 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100734 goto failrd;
735 }
736
737 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000738 writel(CQSPI_REG_INDIRECTRD_DONE,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600739 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200740
Marek Vasut84d4f732021-09-14 05:22:31 +0200741 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600742 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Marek Vasut84d4f732021-09-14 05:22:31 +0200743 CQSPI_REG_INDIRECTRD_DONE, 0, 10, 0);
744 if (ret) {
745 printf("Indirect read clear completion error (%i)\n", ret);
746 goto failrd;
747 }
748
Stefan Roese1c60fe72014-11-07 12:37:49 +0100749 return 0;
750
751failrd:
752 /* Cancel the indirect read */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000753 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600754 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200755 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100756}
757
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600758int cadence_qspi_apb_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530759 const struct spi_mem_op *op)
760{
Vignesh Raghavendra68f82662019-12-05 15:46:06 +0530761 u64 from = op->addr.val;
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530762 void *buf = op->data.buf.in;
763 size_t len = op->data.nbytes;
764
Ashok Reddy Somaf63e6022022-11-29 04:41:34 -0700765 cadence_qspi_apb_enable_linear_mode(true);
T Karthik Reddy3b49fbf2022-05-12 04:05:34 -0600766
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600767 if (priv->use_dac_mode && (from + len < priv->ahbsize)) {
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530768 if (len < 256 ||
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600769 dma_memcpy(buf, priv->ahbbase + from, len) < 0) {
770 memcpy_fromio(buf, priv->ahbbase + from, len);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530771 }
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600772 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530773 return -EIO;
774 return 0;
775 }
776
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600777 return cadence_qspi_apb_indirect_read_execute(priv, len, buf);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530778}
779
Stefan Roese1c60fe72014-11-07 12:37:49 +0100780/* Opcode + Address (3/4 bytes) */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600781int cadence_qspi_apb_write_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530782 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100783{
784 unsigned int reg;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530785 int ret;
786 u8 opcode;
787
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600788 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530789 if (ret)
790 return ret;
791
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600792 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_WRITE_LSB,
793 priv->dtr);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530794 if (ret)
795 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100796
Stefan Roese1c60fe72014-11-07 12:37:49 +0100797 /* Setup the indirect trigger address */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600798 writel(priv->trigger_address,
799 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100800
Stefan Roese1c60fe72014-11-07 12:37:49 +0100801 /* Configure the opcode */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600802 if (priv->dtr)
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530803 opcode = op->cmd.opcode >> 8;
804 else
805 opcode = op->cmd.opcode;
806
807 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600808 reg |= priv->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
809 reg |= priv->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
810 writel(reg, priv->regbase + CQSPI_REG_WR_INSTR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100811
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600812 reg = cadence_qspi_calc_rdreg(priv);
813 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530814
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600815 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100816
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600817 if (priv->dtr) {
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530818 /*
819 * Some flashes like the cypress Semper flash expect a 4-byte
820 * dummy address with the Read SR command in DTR mode, but this
821 * controller does not support sending address with the Read SR
822 * command. So, disable write completion polling on the
823 * controller's side. spi-nor will take care of polling the
824 * status register.
825 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600826 reg = readl(priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530827 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600828 writel(reg, priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530829 }
830
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600831 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100832 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530833 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600834 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100835 return 0;
836}
837
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530838static int
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600839cadence_qspi_apb_indirect_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530840 unsigned int n_tx, const u8 *txbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100841{
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600842 unsigned int page_size = priv->page_size;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200843 unsigned int remaining = n_tx;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530844 const u8 *bb_txbuf = txbuf;
845 void *bounce_buf = NULL;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200846 unsigned int write_bytes;
847 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100848
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530849 /*
850 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
851 * aborts
852 */
853 if ((uintptr_t)txbuf % 4) {
854 bounce_buf = malloc(n_tx);
855 if (!bounce_buf)
856 return -ENOMEM;
857 memcpy(bounce_buf, txbuf, n_tx);
858 bb_txbuf = bounce_buf;
859 }
860
Stefan Roese1c60fe72014-11-07 12:37:49 +0100861 /* Configure the indirect read transfer bytes */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600862 writel(n_tx, priv->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100863
864 /* Start the indirect write transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000865 writel(CQSPI_REG_INDIRECTWR_START,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600866 priv->regbase + CQSPI_REG_INDIRECTWR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100867
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530868 /*
869 * Some delay is required for the above bit to be internally
870 * synchronized by the QSPI module.
871 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600872 ndelay(priv->wr_delay);
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530873
Marek Vasutdae51dd2016-04-27 23:18:55 +0200874 while (remaining > 0) {
875 write_bytes = remaining > page_size ? page_size : remaining;
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600876 writesl(priv->ahbbase, bb_txbuf, write_bytes >> 2);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530877 if (write_bytes % 4)
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600878 writesb(priv->ahbbase,
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530879 bb_txbuf + rounddown(write_bytes, 4),
880 write_bytes % 4);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100881
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600882 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_SDRAMLEVEL,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100883 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
884 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200885 if (ret) {
886 printf("Indirect write timed out (%i)\n", ret);
887 goto failwr;
888 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100889
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530890 bb_txbuf += write_bytes;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200891 remaining -= write_bytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100892 }
893
Marek Vasutdae51dd2016-04-27 23:18:55 +0200894 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600895 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100896 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200897 if (ret) {
898 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100899 goto failwr;
900 }
901
902 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000903 writel(CQSPI_REG_INDIRECTWR_DONE,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600904 priv->regbase + CQSPI_REG_INDIRECTWR);
Marek Vasut84d4f732021-09-14 05:22:31 +0200905
906 /* Check indirect done status */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600907 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Marek Vasut84d4f732021-09-14 05:22:31 +0200908 CQSPI_REG_INDIRECTWR_DONE, 0, 10, 0);
909 if (ret) {
910 printf("Indirect write clear completion error (%i)\n", ret);
911 goto failwr;
912 }
913
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530914 if (bounce_buf)
915 free(bounce_buf);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100916 return 0;
917
918failwr:
919 /* Cancel the indirect write */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000920 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600921 priv->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530922 if (bounce_buf)
923 free(bounce_buf);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200924 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100925}
926
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600927int cadence_qspi_apb_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530928 const struct spi_mem_op *op)
929{
930 u32 to = op->addr.val;
931 const void *buf = op->data.buf.out;
932 size_t len = op->data.nbytes;
933
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530934 /*
935 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
936 * address (all 0s) with the read status register command in DTR mode.
937 * But this controller does not support sending dummy address bytes to
938 * the flash when it is polling the write completion register in DTR
939 * mode. So, we can not use direct mode when in DTR mode for writing
940 * data.
941 */
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600942 cadence_qspi_apb_enable_linear_mode(true);
943 if (!priv->dtr && priv->use_dac_mode && (to + len < priv->ahbsize)) {
944 memcpy_toio(priv->ahbbase + to, buf, len);
945 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530946 return -EIO;
947 return 0;
948 }
949
Ashok Reddy Somaf5817652022-08-24 05:38:47 -0600950 return cadence_qspi_apb_indirect_write_execute(priv, len, buf);
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530951}
952
Stefan Roese1c60fe72014-11-07 12:37:49 +0100953void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
954{
955 unsigned int reg;
956
957 /* enter XiP mode immediately and enable direct mode */
958 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000959 reg |= CQSPI_REG_CONFIG_ENABLE;
960 reg |= CQSPI_REG_CONFIG_DIRECT;
961 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100962 writel(reg, reg_base + CQSPI_REG_CONFIG);
963
964 /* keep the XiP mode */
965 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
966
967 /* Enable mode bit at devrd */
968 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
969 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
970 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
971}