blob: 0a5af05614307b816f6065e83f3aa40d0340e314 [file] [log] [blame]
Stefan Roese1c60fe72014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <common.h>
29#include <asm/io.h>
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053030#include <dma.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090031#include <linux/errno.h>
Marek Vasutdae51dd2016-04-27 23:18:55 +020032#include <wait_bit.h>
Vignesh R4ca60192016-07-06 10:20:56 +053033#include <spi.h>
Vignesh Raghavendra27516a32020-01-27 10:36:39 +053034#include <spi-mem.h>
Vignesh Rad4bd8a2018-01-24 10:44:07 +053035#include <malloc.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010036#include "cadence_qspi.h"
37
Phil Edworthy3a5ae122016-11-29 12:58:30 +000038#define CQSPI_REG_POLL_US 1 /* 1us */
39#define CQSPI_REG_RETRY 10000
40#define CQSPI_POLL_IDLE_RETRY 3
Stefan Roese1c60fe72014-11-07 12:37:49 +010041
Stefan Roese1c60fe72014-11-07 12:37:49 +010042/* Transfer mode */
Phil Edworthy3a5ae122016-11-29 12:58:30 +000043#define CQSPI_INST_TYPE_SINGLE 0
44#define CQSPI_INST_TYPE_DUAL 1
45#define CQSPI_INST_TYPE_QUAD 2
Vignesh Raghavendra68f82662019-12-05 15:46:06 +053046#define CQSPI_INST_TYPE_OCTAL 3
Stefan Roese1c60fe72014-11-07 12:37:49 +010047
Phil Edworthy3a5ae122016-11-29 12:58:30 +000048#define CQSPI_STIG_DATA_LEN_MAX 8
Stefan Roese1c60fe72014-11-07 12:37:49 +010049
Phil Edworthy3a5ae122016-11-29 12:58:30 +000050#define CQSPI_DUMMY_CLKS_PER_BYTE 8
51#define CQSPI_DUMMY_BYTES_MAX 4
Stefan Roese1c60fe72014-11-07 12:37:49 +010052
Stefan Roese1c60fe72014-11-07 12:37:49 +010053/****************************************************************************
54 * Controller's configuration and status register (offset from QSPI_BASE)
55 ****************************************************************************/
56#define CQSPI_REG_CONFIG 0x00
Phil Edworthy3a5ae122016-11-29 12:58:30 +000057#define CQSPI_REG_CONFIG_ENABLE BIT(0)
Phil Edworthydd18c6f2016-11-29 12:58:29 +000058#define CQSPI_REG_CONFIG_CLK_POL BIT(1)
59#define CQSPI_REG_CONFIG_CLK_PHA BIT(2)
Phil Edworthy3a5ae122016-11-29 12:58:30 +000060#define CQSPI_REG_CONFIG_DIRECT BIT(7)
61#define CQSPI_REG_CONFIG_DECODE BIT(9)
62#define CQSPI_REG_CONFIG_XIP_IMM BIT(18)
Stefan Roese1c60fe72014-11-07 12:37:49 +010063#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
64#define CQSPI_REG_CONFIG_BAUD_LSB 19
65#define CQSPI_REG_CONFIG_IDLE_LSB 31
66#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
67#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
68
69#define CQSPI_REG_RD_INSTR 0x04
70#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
71#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
72#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
73#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
74#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
75#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
76#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
77#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
78#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
79#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
80
81#define CQSPI_REG_WR_INSTR 0x08
82#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
Ley Foon Tan08987992019-02-27 13:36:14 +080083#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
Stefan Roese1c60fe72014-11-07 12:37:49 +010084
85#define CQSPI_REG_DELAY 0x0C
86#define CQSPI_REG_DELAY_TSLCH_LSB 0
87#define CQSPI_REG_DELAY_TCHSH_LSB 8
88#define CQSPI_REG_DELAY_TSD2D_LSB 16
89#define CQSPI_REG_DELAY_TSHSL_LSB 24
90#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
91#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
92#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
93#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
94
Phil Edworthydd18c6f2016-11-29 12:58:29 +000095#define CQSPI_REG_RD_DATA_CAPTURE 0x10
96#define CQSPI_REG_RD_DATA_CAPTURE_BYPASS BIT(0)
97#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB 1
98#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK 0xF
Stefan Roese1c60fe72014-11-07 12:37:49 +010099
100#define CQSPI_REG_SIZE 0x14
101#define CQSPI_REG_SIZE_ADDRESS_LSB 0
102#define CQSPI_REG_SIZE_PAGE_LSB 4
103#define CQSPI_REG_SIZE_BLOCK_LSB 16
104#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
105#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
106#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
107
108#define CQSPI_REG_SRAMPARTITION 0x18
109#define CQSPI_REG_INDIRECTTRIGGER 0x1C
110
111#define CQSPI_REG_REMAP 0x24
112#define CQSPI_REG_MODE_BIT 0x28
113
114#define CQSPI_REG_SDRAMLEVEL 0x2C
115#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
116#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
117#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
118#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
119
120#define CQSPI_REG_IRQSTATUS 0x40
121#define CQSPI_REG_IRQMASK 0x44
122
123#define CQSPI_REG_INDIRECTRD 0x60
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000124#define CQSPI_REG_INDIRECTRD_START BIT(0)
125#define CQSPI_REG_INDIRECTRD_CANCEL BIT(1)
126#define CQSPI_REG_INDIRECTRD_INPROGRESS BIT(2)
127#define CQSPI_REG_INDIRECTRD_DONE BIT(5)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100128
129#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
130#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
131#define CQSPI_REG_INDIRECTRDBYTES 0x6C
132
133#define CQSPI_REG_CMDCTRL 0x90
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000134#define CQSPI_REG_CMDCTRL_EXECUTE BIT(0)
135#define CQSPI_REG_CMDCTRL_INPROGRESS BIT(1)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100136#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
137#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
138#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
139#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
140#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
141#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
142#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
143#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
144#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
145#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
146#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
147#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
148#define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
149
150#define CQSPI_REG_INDIRECTWR 0x70
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000151#define CQSPI_REG_INDIRECTWR_START BIT(0)
152#define CQSPI_REG_INDIRECTWR_CANCEL BIT(1)
153#define CQSPI_REG_INDIRECTWR_INPROGRESS BIT(2)
154#define CQSPI_REG_INDIRECTWR_DONE BIT(5)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100155
156#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
157#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
158#define CQSPI_REG_INDIRECTWRBYTES 0x7C
159
160#define CQSPI_REG_CMDADDRESS 0x94
161#define CQSPI_REG_CMDREADDATALOWER 0xA0
162#define CQSPI_REG_CMDREADDATAUPPER 0xA4
163#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
164#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
165
166#define CQSPI_REG_IS_IDLE(base) \
167 ((readl(base + CQSPI_REG_CONFIG) >> \
168 CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
169
Stefan Roese1c60fe72014-11-07 12:37:49 +0100170#define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
171 (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
172 CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
173
174#define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
175 (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
176 CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
177
Stefan Roese1c60fe72014-11-07 12:37:49 +0100178void cadence_qspi_apb_controller_enable(void *reg_base)
179{
180 unsigned int reg;
181 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000182 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100183 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100184}
185
186void cadence_qspi_apb_controller_disable(void *reg_base)
187{
188 unsigned int reg;
189 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000190 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100191 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100192}
193
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530194void cadence_qspi_apb_dac_mode_enable(void *reg_base)
195{
196 unsigned int reg;
197
198 reg = readl(reg_base + CQSPI_REG_CONFIG);
199 reg |= CQSPI_REG_CONFIG_DIRECT;
200 writel(reg, reg_base + CQSPI_REG_CONFIG);
201}
202
Stefan Roese1c60fe72014-11-07 12:37:49 +0100203/* Return 1 if idle, otherwise return 0 (busy). */
204static unsigned int cadence_qspi_wait_idle(void *reg_base)
205{
206 unsigned int start, count = 0;
207 /* timeout in unit of ms */
208 unsigned int timeout = 5000;
209
210 start = get_timer(0);
211 for ( ; get_timer(start) < timeout ; ) {
212 if (CQSPI_REG_IS_IDLE(reg_base))
213 count++;
214 else
215 count = 0;
216 /*
217 * Ensure the QSPI controller is in true idle state after
218 * reading back the same idle status consecutively
219 */
220 if (count >= CQSPI_POLL_IDLE_RETRY)
221 return 1;
222 }
223
224 /* Timeout, still in busy mode. */
225 printf("QSPI: QSPI is still busy after poll for %d times.\n",
226 CQSPI_REG_RETRY);
227 return 0;
228}
229
230void cadence_qspi_apb_readdata_capture(void *reg_base,
231 unsigned int bypass, unsigned int delay)
232{
233 unsigned int reg;
234 cadence_qspi_apb_controller_disable(reg_base);
235
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000236 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100237
238 if (bypass)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000239 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100240 else
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000241 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100242
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000243 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
244 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100245
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000246 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
247 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100248
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000249 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100250
251 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100252}
253
254void cadence_qspi_apb_config_baudrate_div(void *reg_base,
255 unsigned int ref_clk_hz, unsigned int sclk_hz)
256{
257 unsigned int reg;
258 unsigned int div;
259
260 cadence_qspi_apb_controller_disable(reg_base);
261 reg = readl(reg_base + CQSPI_REG_CONFIG);
262 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
263
Phil Edworthy8f24a442016-11-29 12:58:27 +0000264 /*
265 * The baud_div field in the config reg is 4 bits, and the ref clock is
266 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
267 * SPI clock rate is less than or equal to the requested clock rate.
268 */
269 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100270
Chin Liang See91b2c192016-08-07 22:50:40 +0800271 /* ensure the baud rate doesn't exceed the max value */
272 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
273 div = CQSPI_REG_CONFIG_BAUD_MASK;
274
Phil Edworthy67824ad2016-11-29 12:58:28 +0000275 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
276 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
277
Chin Liang See91b2c192016-08-07 22:50:40 +0800278 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100279 writel(reg, reg_base + CQSPI_REG_CONFIG);
280
281 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100282}
283
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000284void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100285{
286 unsigned int reg;
287
288 cadence_qspi_apb_controller_disable(reg_base);
289 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000290 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100291
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000292 if (mode & SPI_CPOL)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000293 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000294 if (mode & SPI_CPHA)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000295 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100296
297 writel(reg, reg_base + CQSPI_REG_CONFIG);
298
299 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100300}
301
302void cadence_qspi_apb_chipselect(void *reg_base,
303 unsigned int chip_select, unsigned int decoder_enable)
304{
305 unsigned int reg;
306
307 cadence_qspi_apb_controller_disable(reg_base);
308
309 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
310 decoder_enable);
311
312 reg = readl(reg_base + CQSPI_REG_CONFIG);
313 /* docoder */
314 if (decoder_enable) {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000315 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100316 } else {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000317 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100318 /* Convert CS if without decoder.
319 * CS0 to 4b'1110
320 * CS1 to 4b'1101
321 * CS2 to 4b'1011
322 * CS3 to 4b'0111
323 */
324 chip_select = 0xF & ~(1 << chip_select);
325 }
326
327 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
328 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
329 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
330 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
331 writel(reg, reg_base + CQSPI_REG_CONFIG);
332
333 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100334}
335
336void cadence_qspi_apb_delay(void *reg_base,
337 unsigned int ref_clk, unsigned int sclk_hz,
338 unsigned int tshsl_ns, unsigned int tsd2d_ns,
339 unsigned int tchsh_ns, unsigned int tslch_ns)
340{
341 unsigned int ref_clk_ns;
342 unsigned int sclk_ns;
343 unsigned int tshsl, tchsh, tslch, tsd2d;
344 unsigned int reg;
345
346 cadence_qspi_apb_controller_disable(reg_base);
347
348 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000349 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100350
351 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000352 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100353
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000354 /* The controller adds additional delay to that programmed in the reg */
355 if (tshsl_ns >= sclk_ns + ref_clk_ns)
356 tshsl_ns -= sclk_ns + ref_clk_ns;
357 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
358 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
359 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
360 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
361 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
362 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100363
364 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
365 << CQSPI_REG_DELAY_TSHSL_LSB);
366 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
367 << CQSPI_REG_DELAY_TCHSH_LSB);
368 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
369 << CQSPI_REG_DELAY_TSLCH_LSB);
370 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
371 << CQSPI_REG_DELAY_TSD2D_LSB);
372 writel(reg, reg_base + CQSPI_REG_DELAY);
373
374 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100375}
376
377void cadence_qspi_apb_controller_init(struct cadence_spi_platdata *plat)
378{
379 unsigned reg;
380
381 cadence_qspi_apb_controller_disable(plat->regbase);
382
383 /* Configure the device size and address bytes */
384 reg = readl(plat->regbase + CQSPI_REG_SIZE);
385 /* Clear the previous value */
386 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
387 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
388 reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
389 reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
390 writel(reg, plat->regbase + CQSPI_REG_SIZE);
391
392 /* Configure the remap address register, no remap */
393 writel(0, plat->regbase + CQSPI_REG_REMAP);
394
Vikas Manocha215cea02015-07-02 18:29:43 -0700395 /* Indirect mode configurations */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600396 writel(plat->fifo_depth / 2, plat->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manocha215cea02015-07-02 18:29:43 -0700397
Stefan Roese1c60fe72014-11-07 12:37:49 +0100398 /* Disable all interrupts */
399 writel(0, plat->regbase + CQSPI_REG_IRQMASK);
400
401 cadence_qspi_apb_controller_enable(plat->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100402}
403
404static int cadence_qspi_apb_exec_flash_cmd(void *reg_base,
405 unsigned int reg)
406{
407 unsigned int retry = CQSPI_REG_RETRY;
408
409 /* Write the CMDCTRL without start execution. */
410 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
411 /* Start execute */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000412 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100413 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
414
415 while (retry--) {
416 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000417 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100418 break;
419 udelay(1);
420 }
421
422 if (!retry) {
423 printf("QSPI: flash command execution timeout\n");
424 return -EIO;
425 }
426
427 /* Polling QSPI idle status. */
428 if (!cadence_qspi_wait_idle(reg_base))
429 return -EIO;
430
431 return 0;
432}
433
434/* For command RDID, RDSR. */
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530435int cadence_qspi_apb_command_read(void *reg_base, const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100436{
437 unsigned int reg;
438 unsigned int read_len;
439 int status;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530440 unsigned int rxlen = op->data.nbytes;
441 void *rxbuf = op->data.buf.in;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100442
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530443 if (rxlen > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
444 printf("QSPI: Invalid input arguments rxlen %u\n", rxlen);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100445 return -EINVAL;
446 }
447
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530448 reg = op->cmd.opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100449
450 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
451
452 /* 0 means 1 byte. */
453 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
454 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
455 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
456 if (status != 0)
457 return status;
458
459 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
460
461 /* Put the read value into rx_buf */
462 read_len = (rxlen > 4) ? 4 : rxlen;
463 memcpy(rxbuf, &reg, read_len);
464 rxbuf += read_len;
465
466 if (rxlen > 4) {
467 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
468
469 read_len = rxlen - read_len;
470 memcpy(rxbuf, &reg, read_len);
471 }
472 return 0;
473}
474
475/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530476int cadence_qspi_apb_command_write(void *reg_base, const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100477{
478 unsigned int reg = 0;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100479 unsigned int wr_data;
480 unsigned int wr_len;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530481 unsigned int txlen = op->data.nbytes;
482 const void *txbuf = op->data.buf.out;
483 u32 addr;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100484
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530485 /* Reorder address to SPI bus order if only transferring address */
486 if (!txlen) {
487 addr = cpu_to_be32(op->addr.val);
488 if (op->addr.nbytes == 3)
489 addr >>= 8;
490 txbuf = &addr;
491 txlen = op->addr.nbytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100492 }
493
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530494 if (txlen > CQSPI_STIG_DATA_LEN_MAX) {
495 printf("QSPI: Invalid input arguments txlen %u\n", txlen);
496 return -EINVAL;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100497 }
498
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530499 reg |= op->cmd.opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
500
Stefan Roese1c60fe72014-11-07 12:37:49 +0100501 if (txlen) {
502 /* writing data = yes */
503 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
504 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
505 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
506
507 wr_len = txlen > 4 ? 4 : txlen;
508 memcpy(&wr_data, txbuf, wr_len);
509 writel(wr_data, reg_base +
510 CQSPI_REG_CMDWRITEDATALOWER);
511
512 if (txlen > 4) {
513 txbuf += wr_len;
514 wr_len = txlen - wr_len;
515 memcpy(&wr_data, txbuf, wr_len);
516 writel(wr_data, reg_base +
517 CQSPI_REG_CMDWRITEDATAUPPER);
518 }
519 }
520
521 /* Execute the command */
522 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
523}
524
525/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530526int cadence_qspi_apb_read_setup(struct cadence_spi_platdata *plat,
527 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100528{
529 unsigned int reg;
530 unsigned int rd_reg;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100531 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530532 unsigned int dummy_bytes = op->dummy.nbytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100533
534 /* Setup the indirect trigger address */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600535 writel(plat->trigger_address,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100536 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
537
Stefan Roese1c60fe72014-11-07 12:37:49 +0100538 /* Configure the opcode */
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530539 rd_reg = op->cmd.opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100540
Vignesh Raghavendra68f82662019-12-05 15:46:06 +0530541 if (op->data.buswidth == 8)
542 /* Instruction and address at DQ0, data at DQ0-7. */
543 rd_reg |= CQSPI_INST_TYPE_OCTAL << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
544 else if (op->data.buswidth == 4)
Vignesh R4ca60192016-07-06 10:20:56 +0530545 /* Instruction and address at DQ0, data at DQ0-3. */
546 rd_reg |= CQSPI_INST_TYPE_QUAD << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100547
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530548 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100549
Stefan Roese1c60fe72014-11-07 12:37:49 +0100550 if (dummy_bytes) {
551 if (dummy_bytes > CQSPI_DUMMY_BYTES_MAX)
552 dummy_bytes = CQSPI_DUMMY_BYTES_MAX;
553
Stefan Roese1c60fe72014-11-07 12:37:49 +0100554 /* Convert to clock cycles. */
555 dummy_clk = dummy_bytes * CQSPI_DUMMY_CLKS_PER_BYTE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100556
557 if (dummy_clk)
558 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
559 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
560 }
561
562 writel(rd_reg, plat->regbase + CQSPI_REG_RD_INSTR);
563
564 /* set device size */
565 reg = readl(plat->regbase + CQSPI_REG_SIZE);
566 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530567 reg |= (op->addr.nbytes - 1);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100568 writel(reg, plat->regbase + CQSPI_REG_SIZE);
569 return 0;
570}
571
Marek Vasut8c177432016-04-27 23:38:05 +0200572static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_platdata *plat)
573{
574 u32 reg = readl(plat->regbase + CQSPI_REG_SDRAMLEVEL);
575 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
576 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
577}
578
579static int cadence_qspi_wait_for_data(struct cadence_spi_platdata *plat)
580{
581 unsigned int timeout = 10000;
582 u32 reg;
583
584 while (timeout--) {
585 reg = cadence_qspi_get_rd_sram_level(plat);
586 if (reg)
587 return reg;
588 udelay(1);
589 }
590
591 return -ETIMEDOUT;
592}
593
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530594static int
595cadence_qspi_apb_indirect_read_execute(struct cadence_spi_platdata *plat,
596 unsigned int n_rx, u8 *rxbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100597{
Marek Vasut8c177432016-04-27 23:38:05 +0200598 unsigned int remaining = n_rx;
599 unsigned int bytes_to_read = 0;
600 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100601
Marek Vasut8c177432016-04-27 23:38:05 +0200602 writel(n_rx, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100603
604 /* Start the indirect read transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000605 writel(CQSPI_REG_INDIRECTRD_START,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100606 plat->regbase + CQSPI_REG_INDIRECTRD);
607
Marek Vasut8c177432016-04-27 23:38:05 +0200608 while (remaining > 0) {
609 ret = cadence_qspi_wait_for_data(plat);
610 if (ret < 0) {
611 printf("Indirect write timed out (%i)\n", ret);
612 goto failrd;
613 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100614
Marek Vasut8c177432016-04-27 23:38:05 +0200615 bytes_to_read = ret;
616
617 while (bytes_to_read != 0) {
Jason Rush1b4df5e2018-01-23 17:13:09 -0600618 bytes_to_read *= plat->fifo_width;
Marek Vasut8c177432016-04-27 23:38:05 +0200619 bytes_to_read = bytes_to_read > remaining ?
620 remaining : bytes_to_read;
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530621 /*
622 * Handle non-4-byte aligned access to avoid
623 * data abort.
624 */
625 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
626 readsb(plat->ahbbase, rxbuf, bytes_to_read);
627 else
628 readsl(plat->ahbbase, rxbuf,
629 bytes_to_read >> 2);
630 rxbuf += bytes_to_read;
Marek Vasut8c177432016-04-27 23:38:05 +0200631 remaining -= bytes_to_read;
632 bytes_to_read = cadence_qspi_get_rd_sram_level(plat);
633 }
634 }
635
636 /* Check indirect done status */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100637 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
638 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut8c177432016-04-27 23:38:05 +0200639 if (ret) {
640 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100641 goto failrd;
642 }
643
644 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000645 writel(CQSPI_REG_INDIRECTRD_DONE,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100646 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200647
Stefan Roese1c60fe72014-11-07 12:37:49 +0100648 return 0;
649
650failrd:
651 /* Cancel the indirect read */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000652 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100653 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200654 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100655}
656
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530657int cadence_qspi_apb_read_execute(struct cadence_spi_platdata *plat,
658 const struct spi_mem_op *op)
659{
Vignesh Raghavendra68f82662019-12-05 15:46:06 +0530660 u64 from = op->addr.val;
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530661 void *buf = op->data.buf.in;
662 size_t len = op->data.nbytes;
663
664 if (plat->use_dac_mode && (from + len < plat->ahbsize)) {
665 if (len < 256 ||
666 dma_memcpy(buf, plat->ahbbase + from, len) < 0) {
667 memcpy_fromio(buf, plat->ahbbase + from, len);
668 }
669 if (!cadence_qspi_wait_idle(plat->regbase))
670 return -EIO;
671 return 0;
672 }
673
674 return cadence_qspi_apb_indirect_read_execute(plat, len, buf);
675}
676
Stefan Roese1c60fe72014-11-07 12:37:49 +0100677/* Opcode + Address (3/4 bytes) */
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530678int cadence_qspi_apb_write_setup(struct cadence_spi_platdata *plat,
679 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100680{
681 unsigned int reg;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100682
Stefan Roese1c60fe72014-11-07 12:37:49 +0100683 /* Setup the indirect trigger address */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600684 writel(plat->trigger_address,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100685 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
686
Stefan Roese1c60fe72014-11-07 12:37:49 +0100687 /* Configure the opcode */
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530688 reg = op->cmd.opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100689 writel(reg, plat->regbase + CQSPI_REG_WR_INSTR);
690
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530691 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100692
693 reg = readl(plat->regbase + CQSPI_REG_SIZE);
694 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530695 reg |= (op->addr.nbytes - 1);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100696 writel(reg, plat->regbase + CQSPI_REG_SIZE);
697 return 0;
698}
699
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530700static int
701cadence_qspi_apb_indirect_write_execute(struct cadence_spi_platdata *plat,
702 unsigned int n_tx, const u8 *txbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100703{
Marek Vasutdae51dd2016-04-27 23:18:55 +0200704 unsigned int page_size = plat->page_size;
705 unsigned int remaining = n_tx;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530706 const u8 *bb_txbuf = txbuf;
707 void *bounce_buf = NULL;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200708 unsigned int write_bytes;
709 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100710
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530711 /*
712 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
713 * aborts
714 */
715 if ((uintptr_t)txbuf % 4) {
716 bounce_buf = malloc(n_tx);
717 if (!bounce_buf)
718 return -ENOMEM;
719 memcpy(bounce_buf, txbuf, n_tx);
720 bb_txbuf = bounce_buf;
721 }
722
Stefan Roese1c60fe72014-11-07 12:37:49 +0100723 /* Configure the indirect read transfer bytes */
Marek Vasutdae51dd2016-04-27 23:18:55 +0200724 writel(n_tx, plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100725
726 /* Start the indirect write transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000727 writel(CQSPI_REG_INDIRECTWR_START,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100728 plat->regbase + CQSPI_REG_INDIRECTWR);
729
Marek Vasutdae51dd2016-04-27 23:18:55 +0200730 while (remaining > 0) {
731 write_bytes = remaining > page_size ? page_size : remaining;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530732 writesl(plat->ahbbase, bb_txbuf, write_bytes >> 2);
733 if (write_bytes % 4)
734 writesb(plat->ahbbase,
735 bb_txbuf + rounddown(write_bytes, 4),
736 write_bytes % 4);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100737
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100738 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_SDRAMLEVEL,
739 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
740 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200741 if (ret) {
742 printf("Indirect write timed out (%i)\n", ret);
743 goto failwr;
744 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100745
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530746 bb_txbuf += write_bytes;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200747 remaining -= write_bytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100748 }
749
Marek Vasutdae51dd2016-04-27 23:18:55 +0200750 /* Check indirect done status */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100751 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
752 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200753 if (ret) {
754 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100755 goto failwr;
756 }
757
758 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000759 writel(CQSPI_REG_INDIRECTWR_DONE,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100760 plat->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530761 if (bounce_buf)
762 free(bounce_buf);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100763 return 0;
764
765failwr:
766 /* Cancel the indirect write */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000767 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100768 plat->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530769 if (bounce_buf)
770 free(bounce_buf);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200771 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100772}
773
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530774int cadence_qspi_apb_write_execute(struct cadence_spi_platdata *plat,
775 const struct spi_mem_op *op)
776{
777 u32 to = op->addr.val;
778 const void *buf = op->data.buf.out;
779 size_t len = op->data.nbytes;
780
781 if (plat->use_dac_mode && (to + len < plat->ahbsize)) {
782 memcpy_toio(plat->ahbbase + to, buf, len);
783 if (!cadence_qspi_wait_idle(plat->regbase))
784 return -EIO;
785 return 0;
786 }
787
788 return cadence_qspi_apb_indirect_write_execute(plat, len, buf);
789}
790
Stefan Roese1c60fe72014-11-07 12:37:49 +0100791void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
792{
793 unsigned int reg;
794
795 /* enter XiP mode immediately and enable direct mode */
796 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000797 reg |= CQSPI_REG_CONFIG_ENABLE;
798 reg |= CQSPI_REG_CONFIG_DIRECT;
799 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100800 writel(reg, reg_base + CQSPI_REG_CONFIG);
801
802 /* keep the XiP mode */
803 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
804
805 /* Enable mode bit at devrd */
806 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
807 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
808 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
809}