blob: 429ee335db6ec948b19170d5f4f96da9aabd2922 [file] [log] [blame]
Stefan Roese1c60fe72014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -060029#include <log.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010030#include <asm/io.h>
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053031#include <dma.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060032#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060033#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090034#include <linux/errno.h>
Marek Vasutdae51dd2016-04-27 23:18:55 +020035#include <wait_bit.h>
Vignesh R4ca60192016-07-06 10:20:56 +053036#include <spi.h>
Vignesh Raghavendra27516a32020-01-27 10:36:39 +053037#include <spi-mem.h>
Vignesh Rad4bd8a2018-01-24 10:44:07 +053038#include <malloc.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010039#include "cadence_qspi.h"
40
Phil Edworthy3a5ae122016-11-29 12:58:30 +000041#define CQSPI_REG_POLL_US 1 /* 1us */
42#define CQSPI_REG_RETRY 10000
43#define CQSPI_POLL_IDLE_RETRY 3
Stefan Roese1c60fe72014-11-07 12:37:49 +010044
Stefan Roese1c60fe72014-11-07 12:37:49 +010045/* Transfer mode */
Phil Edworthy3a5ae122016-11-29 12:58:30 +000046#define CQSPI_INST_TYPE_SINGLE 0
47#define CQSPI_INST_TYPE_DUAL 1
48#define CQSPI_INST_TYPE_QUAD 2
Vignesh Raghavendra68f82662019-12-05 15:46:06 +053049#define CQSPI_INST_TYPE_OCTAL 3
Stefan Roese1c60fe72014-11-07 12:37:49 +010050
Phil Edworthy3a5ae122016-11-29 12:58:30 +000051#define CQSPI_STIG_DATA_LEN_MAX 8
Stefan Roese1c60fe72014-11-07 12:37:49 +010052
Phil Edworthy3a5ae122016-11-29 12:58:30 +000053#define CQSPI_DUMMY_CLKS_PER_BYTE 8
Pratyush Yadave1814ad2021-06-26 00:47:09 +053054#define CQSPI_DUMMY_CLKS_MAX 31
Stefan Roese1c60fe72014-11-07 12:37:49 +010055
Stefan Roese1c60fe72014-11-07 12:37:49 +010056/****************************************************************************
57 * Controller's configuration and status register (offset from QSPI_BASE)
58 ****************************************************************************/
59#define CQSPI_REG_CONFIG 0x00
Phil Edworthy3a5ae122016-11-29 12:58:30 +000060#define CQSPI_REG_CONFIG_ENABLE BIT(0)
Phil Edworthydd18c6f2016-11-29 12:58:29 +000061#define CQSPI_REG_CONFIG_CLK_POL BIT(1)
62#define CQSPI_REG_CONFIG_CLK_PHA BIT(2)
Phil Edworthy3a5ae122016-11-29 12:58:30 +000063#define CQSPI_REG_CONFIG_DIRECT BIT(7)
64#define CQSPI_REG_CONFIG_DECODE BIT(9)
65#define CQSPI_REG_CONFIG_XIP_IMM BIT(18)
Stefan Roese1c60fe72014-11-07 12:37:49 +010066#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
67#define CQSPI_REG_CONFIG_BAUD_LSB 19
Pratyush Yadave1814ad2021-06-26 00:47:09 +053068#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
69#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
Stefan Roese1c60fe72014-11-07 12:37:49 +010070#define CQSPI_REG_CONFIG_IDLE_LSB 31
71#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
72#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
73
74#define CQSPI_REG_RD_INSTR 0x04
75#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
76#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
77#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
78#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
79#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
80#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
81#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
82#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
83#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
84#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
85
86#define CQSPI_REG_WR_INSTR 0x08
87#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
Pratyush Yadave1814ad2021-06-26 00:47:09 +053088#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
Ley Foon Tan08987992019-02-27 13:36:14 +080089#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
Stefan Roese1c60fe72014-11-07 12:37:49 +010090
91#define CQSPI_REG_DELAY 0x0C
92#define CQSPI_REG_DELAY_TSLCH_LSB 0
93#define CQSPI_REG_DELAY_TCHSH_LSB 8
94#define CQSPI_REG_DELAY_TSD2D_LSB 16
95#define CQSPI_REG_DELAY_TSHSL_LSB 24
96#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
97#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
98#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
99#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
100
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000101#define CQSPI_REG_RD_DATA_CAPTURE 0x10
102#define CQSPI_REG_RD_DATA_CAPTURE_BYPASS BIT(0)
103#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB 1
104#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK 0xF
Stefan Roese1c60fe72014-11-07 12:37:49 +0100105
106#define CQSPI_REG_SIZE 0x14
107#define CQSPI_REG_SIZE_ADDRESS_LSB 0
108#define CQSPI_REG_SIZE_PAGE_LSB 4
109#define CQSPI_REG_SIZE_BLOCK_LSB 16
110#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
111#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
112#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
113
114#define CQSPI_REG_SRAMPARTITION 0x18
115#define CQSPI_REG_INDIRECTTRIGGER 0x1C
116
117#define CQSPI_REG_REMAP 0x24
118#define CQSPI_REG_MODE_BIT 0x28
119
120#define CQSPI_REG_SDRAMLEVEL 0x2C
121#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
122#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
123#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
124#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
125
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530126#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
127#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
128
Stefan Roese1c60fe72014-11-07 12:37:49 +0100129#define CQSPI_REG_IRQSTATUS 0x40
130#define CQSPI_REG_IRQMASK 0x44
131
132#define CQSPI_REG_INDIRECTRD 0x60
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000133#define CQSPI_REG_INDIRECTRD_START BIT(0)
134#define CQSPI_REG_INDIRECTRD_CANCEL BIT(1)
135#define CQSPI_REG_INDIRECTRD_INPROGRESS BIT(2)
136#define CQSPI_REG_INDIRECTRD_DONE BIT(5)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100137
138#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
139#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
140#define CQSPI_REG_INDIRECTRDBYTES 0x6C
141
142#define CQSPI_REG_CMDCTRL 0x90
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000143#define CQSPI_REG_CMDCTRL_EXECUTE BIT(0)
144#define CQSPI_REG_CMDCTRL_INPROGRESS BIT(1)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100145#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
146#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
147#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
148#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
149#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
150#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
151#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
152#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
153#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
154#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
155#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
156#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
157#define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
158
159#define CQSPI_REG_INDIRECTWR 0x70
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000160#define CQSPI_REG_INDIRECTWR_START BIT(0)
161#define CQSPI_REG_INDIRECTWR_CANCEL BIT(1)
162#define CQSPI_REG_INDIRECTWR_INPROGRESS BIT(2)
163#define CQSPI_REG_INDIRECTWR_DONE BIT(5)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100164
165#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
166#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
167#define CQSPI_REG_INDIRECTWRBYTES 0x7C
168
169#define CQSPI_REG_CMDADDRESS 0x94
170#define CQSPI_REG_CMDREADDATALOWER 0xA0
171#define CQSPI_REG_CMDREADDATAUPPER 0xA4
172#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
173#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
174
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530175#define CQSPI_REG_OP_EXT_LOWER 0xE0
176#define CQSPI_REG_OP_EXT_READ_LSB 24
177#define CQSPI_REG_OP_EXT_WRITE_LSB 16
178#define CQSPI_REG_OP_EXT_STIG_LSB 0
179
Stefan Roese1c60fe72014-11-07 12:37:49 +0100180#define CQSPI_REG_IS_IDLE(base) \
181 ((readl(base + CQSPI_REG_CONFIG) >> \
182 CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
183
Stefan Roese1c60fe72014-11-07 12:37:49 +0100184#define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
185 (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
186 CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
187
188#define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
189 (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
190 CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
191
Stefan Roese1c60fe72014-11-07 12:37:49 +0100192void cadence_qspi_apb_controller_enable(void *reg_base)
193{
194 unsigned int reg;
195 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000196 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100197 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100198}
199
200void cadence_qspi_apb_controller_disable(void *reg_base)
201{
202 unsigned int reg;
203 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000204 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100205 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100206}
207
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530208void cadence_qspi_apb_dac_mode_enable(void *reg_base)
209{
210 unsigned int reg;
211
212 reg = readl(reg_base + CQSPI_REG_CONFIG);
213 reg |= CQSPI_REG_CONFIG_DIRECT;
214 writel(reg, reg_base + CQSPI_REG_CONFIG);
215}
216
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530217static unsigned int cadence_qspi_calc_dummy(const struct spi_mem_op *op,
218 bool dtr)
219{
220 unsigned int dummy_clk;
221
Marek Vasut545be192021-09-14 05:21:48 +0200222 if (!op->dummy.nbytes || !op->dummy.buswidth)
223 return 0;
224
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530225 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
226 if (dtr)
227 dummy_clk /= 2;
228
229 return dummy_clk;
230}
231
232static u32 cadence_qspi_calc_rdreg(struct cadence_spi_plat *plat)
233{
234 u32 rdreg = 0;
235
236 rdreg |= plat->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
237 rdreg |= plat->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
238 rdreg |= plat->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
239
240 return rdreg;
241}
242
243static int cadence_qspi_buswidth_to_inst_type(u8 buswidth)
244{
245 switch (buswidth) {
246 case 0:
247 case 1:
248 return CQSPI_INST_TYPE_SINGLE;
249
250 case 2:
251 return CQSPI_INST_TYPE_DUAL;
252
253 case 4:
254 return CQSPI_INST_TYPE_QUAD;
255
256 case 8:
257 return CQSPI_INST_TYPE_OCTAL;
258
259 default:
260 return -ENOTSUPP;
261 }
262}
263
264static int cadence_qspi_set_protocol(struct cadence_spi_plat *plat,
265 const struct spi_mem_op *op)
266{
267 int ret;
268
269 plat->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
270
271 ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
272 if (ret < 0)
273 return ret;
274 plat->inst_width = ret;
275
276 ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
277 if (ret < 0)
278 return ret;
279 plat->addr_width = ret;
280
281 ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
282 if (ret < 0)
283 return ret;
284 plat->data_width = ret;
285
286 return 0;
287}
288
Stefan Roese1c60fe72014-11-07 12:37:49 +0100289/* Return 1 if idle, otherwise return 0 (busy). */
290static unsigned int cadence_qspi_wait_idle(void *reg_base)
291{
292 unsigned int start, count = 0;
293 /* timeout in unit of ms */
294 unsigned int timeout = 5000;
295
296 start = get_timer(0);
297 for ( ; get_timer(start) < timeout ; ) {
298 if (CQSPI_REG_IS_IDLE(reg_base))
299 count++;
300 else
301 count = 0;
302 /*
303 * Ensure the QSPI controller is in true idle state after
304 * reading back the same idle status consecutively
305 */
306 if (count >= CQSPI_POLL_IDLE_RETRY)
307 return 1;
308 }
309
310 /* Timeout, still in busy mode. */
311 printf("QSPI: QSPI is still busy after poll for %d times.\n",
312 CQSPI_REG_RETRY);
313 return 0;
314}
315
316void cadence_qspi_apb_readdata_capture(void *reg_base,
317 unsigned int bypass, unsigned int delay)
318{
319 unsigned int reg;
320 cadence_qspi_apb_controller_disable(reg_base);
321
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000322 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100323
324 if (bypass)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000325 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100326 else
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000327 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100328
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000329 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
330 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100331
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000332 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
333 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100334
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000335 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100336
337 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100338}
339
340void cadence_qspi_apb_config_baudrate_div(void *reg_base,
341 unsigned int ref_clk_hz, unsigned int sclk_hz)
342{
343 unsigned int reg;
344 unsigned int div;
345
346 cadence_qspi_apb_controller_disable(reg_base);
347 reg = readl(reg_base + CQSPI_REG_CONFIG);
348 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
349
Phil Edworthy8f24a442016-11-29 12:58:27 +0000350 /*
351 * The baud_div field in the config reg is 4 bits, and the ref clock is
352 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
353 * SPI clock rate is less than or equal to the requested clock rate.
354 */
355 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100356
Chin Liang See91b2c192016-08-07 22:50:40 +0800357 /* ensure the baud rate doesn't exceed the max value */
358 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
359 div = CQSPI_REG_CONFIG_BAUD_MASK;
360
Phil Edworthy67824ad2016-11-29 12:58:28 +0000361 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
362 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
363
Chin Liang See91b2c192016-08-07 22:50:40 +0800364 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100365 writel(reg, reg_base + CQSPI_REG_CONFIG);
366
367 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100368}
369
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000370void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100371{
372 unsigned int reg;
373
374 cadence_qspi_apb_controller_disable(reg_base);
375 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000376 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100377
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000378 if (mode & SPI_CPOL)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000379 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000380 if (mode & SPI_CPHA)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000381 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100382
383 writel(reg, reg_base + CQSPI_REG_CONFIG);
384
385 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100386}
387
388void cadence_qspi_apb_chipselect(void *reg_base,
389 unsigned int chip_select, unsigned int decoder_enable)
390{
391 unsigned int reg;
392
393 cadence_qspi_apb_controller_disable(reg_base);
394
395 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
396 decoder_enable);
397
398 reg = readl(reg_base + CQSPI_REG_CONFIG);
399 /* docoder */
400 if (decoder_enable) {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000401 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100402 } else {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000403 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100404 /* Convert CS if without decoder.
405 * CS0 to 4b'1110
406 * CS1 to 4b'1101
407 * CS2 to 4b'1011
408 * CS3 to 4b'0111
409 */
410 chip_select = 0xF & ~(1 << chip_select);
411 }
412
413 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
414 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
415 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
416 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
417 writel(reg, reg_base + CQSPI_REG_CONFIG);
418
419 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100420}
421
422void cadence_qspi_apb_delay(void *reg_base,
423 unsigned int ref_clk, unsigned int sclk_hz,
424 unsigned int tshsl_ns, unsigned int tsd2d_ns,
425 unsigned int tchsh_ns, unsigned int tslch_ns)
426{
427 unsigned int ref_clk_ns;
428 unsigned int sclk_ns;
429 unsigned int tshsl, tchsh, tslch, tsd2d;
430 unsigned int reg;
431
432 cadence_qspi_apb_controller_disable(reg_base);
433
434 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000435 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100436
437 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000438 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100439
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000440 /* The controller adds additional delay to that programmed in the reg */
441 if (tshsl_ns >= sclk_ns + ref_clk_ns)
442 tshsl_ns -= sclk_ns + ref_clk_ns;
443 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
444 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
445 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
446 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
447 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
448 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100449
450 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
451 << CQSPI_REG_DELAY_TSHSL_LSB);
452 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
453 << CQSPI_REG_DELAY_TCHSH_LSB);
454 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
455 << CQSPI_REG_DELAY_TSLCH_LSB);
456 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
457 << CQSPI_REG_DELAY_TSD2D_LSB);
458 writel(reg, reg_base + CQSPI_REG_DELAY);
459
460 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100461}
462
Simon Glassb75b15b2020-12-03 16:55:23 -0700463void cadence_qspi_apb_controller_init(struct cadence_spi_plat *plat)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100464{
465 unsigned reg;
466
467 cadence_qspi_apb_controller_disable(plat->regbase);
468
469 /* Configure the device size and address bytes */
470 reg = readl(plat->regbase + CQSPI_REG_SIZE);
471 /* Clear the previous value */
472 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
473 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
474 reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
475 reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
476 writel(reg, plat->regbase + CQSPI_REG_SIZE);
477
478 /* Configure the remap address register, no remap */
479 writel(0, plat->regbase + CQSPI_REG_REMAP);
480
Vikas Manocha215cea02015-07-02 18:29:43 -0700481 /* Indirect mode configurations */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600482 writel(plat->fifo_depth / 2, plat->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manocha215cea02015-07-02 18:29:43 -0700483
Stefan Roese1c60fe72014-11-07 12:37:49 +0100484 /* Disable all interrupts */
485 writel(0, plat->regbase + CQSPI_REG_IRQMASK);
486
487 cadence_qspi_apb_controller_enable(plat->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100488}
489
490static int cadence_qspi_apb_exec_flash_cmd(void *reg_base,
491 unsigned int reg)
492{
493 unsigned int retry = CQSPI_REG_RETRY;
494
495 /* Write the CMDCTRL without start execution. */
496 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
497 /* Start execute */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000498 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100499 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
500
501 while (retry--) {
502 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000503 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100504 break;
505 udelay(1);
506 }
507
508 if (!retry) {
509 printf("QSPI: flash command execution timeout\n");
510 return -EIO;
511 }
512
513 /* Polling QSPI idle status. */
514 if (!cadence_qspi_wait_idle(reg_base))
515 return -EIO;
516
517 return 0;
518}
519
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530520static int cadence_qspi_setup_opcode_ext(struct cadence_spi_plat *plat,
521 const struct spi_mem_op *op,
522 unsigned int shift)
523{
524 unsigned int reg;
525 u8 ext;
526
527 if (op->cmd.nbytes != 2)
528 return -EINVAL;
529
530 /* Opcode extension is the LSB. */
531 ext = op->cmd.opcode & 0xff;
532
533 reg = readl(plat->regbase + CQSPI_REG_OP_EXT_LOWER);
534 reg &= ~(0xff << shift);
535 reg |= ext << shift;
536 writel(reg, plat->regbase + CQSPI_REG_OP_EXT_LOWER);
537
538 return 0;
539}
540
541static int cadence_qspi_enable_dtr(struct cadence_spi_plat *plat,
542 const struct spi_mem_op *op,
543 unsigned int shift,
544 bool enable)
545{
546 unsigned int reg;
547 int ret;
548
549 reg = readl(plat->regbase + CQSPI_REG_CONFIG);
550
551 if (enable) {
552 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
553 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
554
555 /* Set up command opcode extension. */
556 ret = cadence_qspi_setup_opcode_ext(plat, op, shift);
557 if (ret)
558 return ret;
559 } else {
560 reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
561 reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
562 }
563
564 writel(reg, plat->regbase + CQSPI_REG_CONFIG);
565
566 return 0;
567}
568
569int cadence_qspi_apb_command_read_setup(struct cadence_spi_plat *plat,
570 const struct spi_mem_op *op)
571{
572 int ret;
573 unsigned int reg;
574
575 ret = cadence_qspi_set_protocol(plat, op);
576 if (ret)
577 return ret;
578
579 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
580 plat->dtr);
581 if (ret)
582 return ret;
583
584 reg = cadence_qspi_calc_rdreg(plat);
585 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
586
587 return 0;
588}
589
Stefan Roese1c60fe72014-11-07 12:37:49 +0100590/* For command RDID, RDSR. */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530591int cadence_qspi_apb_command_read(struct cadence_spi_plat *plat,
592 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100593{
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530594 void *reg_base = plat->regbase;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100595 unsigned int reg;
596 unsigned int read_len;
597 int status;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530598 unsigned int rxlen = op->data.nbytes;
599 void *rxbuf = op->data.buf.in;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530600 unsigned int dummy_clk;
601 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100602
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530603 if (rxlen > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
604 printf("QSPI: Invalid input arguments rxlen %u\n", rxlen);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100605 return -EINVAL;
606 }
607
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530608 if (plat->dtr)
609 opcode = op->cmd.opcode >> 8;
610 else
611 opcode = op->cmd.opcode;
612
613 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
614
615 /* Set up dummy cycles. */
616 dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
617 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
618 return -ENOTSUPP;
619
620 if (dummy_clk)
621 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
622 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100623
624 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
625
626 /* 0 means 1 byte. */
627 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
628 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
629 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
630 if (status != 0)
631 return status;
632
633 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
634
635 /* Put the read value into rx_buf */
636 read_len = (rxlen > 4) ? 4 : rxlen;
637 memcpy(rxbuf, &reg, read_len);
638 rxbuf += read_len;
639
640 if (rxlen > 4) {
641 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
642
643 read_len = rxlen - read_len;
644 memcpy(rxbuf, &reg, read_len);
645 }
646 return 0;
647}
648
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530649int cadence_qspi_apb_command_write_setup(struct cadence_spi_plat *plat,
650 const struct spi_mem_op *op)
651{
652 int ret;
653 unsigned int reg;
654
655 ret = cadence_qspi_set_protocol(plat, op);
656 if (ret)
657 return ret;
658
659 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
660 plat->dtr);
661 if (ret)
662 return ret;
663
664 reg = cadence_qspi_calc_rdreg(plat);
665 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
666
667 return 0;
668}
669
Stefan Roese1c60fe72014-11-07 12:37:49 +0100670/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530671int cadence_qspi_apb_command_write(struct cadence_spi_plat *plat,
672 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100673{
674 unsigned int reg = 0;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100675 unsigned int wr_data;
676 unsigned int wr_len;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530677 unsigned int txlen = op->data.nbytes;
678 const void *txbuf = op->data.buf.out;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530679 void *reg_base = plat->regbase;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530680 u32 addr;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530681 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100682
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530683 /* Reorder address to SPI bus order if only transferring address */
684 if (!txlen) {
685 addr = cpu_to_be32(op->addr.val);
686 if (op->addr.nbytes == 3)
687 addr >>= 8;
688 txbuf = &addr;
689 txlen = op->addr.nbytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100690 }
691
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530692 if (txlen > CQSPI_STIG_DATA_LEN_MAX) {
693 printf("QSPI: Invalid input arguments txlen %u\n", txlen);
694 return -EINVAL;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100695 }
696
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530697 if (plat->dtr)
698 opcode = op->cmd.opcode >> 8;
699 else
700 opcode = op->cmd.opcode;
701
702 reg |= opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530703
Stefan Roese1c60fe72014-11-07 12:37:49 +0100704 if (txlen) {
705 /* writing data = yes */
706 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
707 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
708 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
709
710 wr_len = txlen > 4 ? 4 : txlen;
711 memcpy(&wr_data, txbuf, wr_len);
712 writel(wr_data, reg_base +
713 CQSPI_REG_CMDWRITEDATALOWER);
714
715 if (txlen > 4) {
716 txbuf += wr_len;
717 wr_len = txlen - wr_len;
718 memcpy(&wr_data, txbuf, wr_len);
719 writel(wr_data, reg_base +
720 CQSPI_REG_CMDWRITEDATAUPPER);
721 }
722 }
723
724 /* Execute the command */
725 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
726}
727
728/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Simon Glassb75b15b2020-12-03 16:55:23 -0700729int cadence_qspi_apb_read_setup(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530730 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100731{
732 unsigned int reg;
733 unsigned int rd_reg;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100734 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530735 unsigned int dummy_bytes = op->dummy.nbytes;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530736 int ret;
737 u8 opcode;
738
739 ret = cadence_qspi_set_protocol(plat, op);
740 if (ret)
741 return ret;
742
743 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_READ_LSB,
744 plat->dtr);
745 if (ret)
746 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100747
748 /* Setup the indirect trigger address */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600749 writel(plat->trigger_address,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100750 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
751
Stefan Roese1c60fe72014-11-07 12:37:49 +0100752 /* Configure the opcode */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530753 if (plat->dtr)
754 opcode = op->cmd.opcode >> 8;
755 else
756 opcode = op->cmd.opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100757
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530758 rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
759 rd_reg |= cadence_qspi_calc_rdreg(plat);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100760
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530761 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100762
Stefan Roese1c60fe72014-11-07 12:37:49 +0100763 if (dummy_bytes) {
Stefan Roese1c60fe72014-11-07 12:37:49 +0100764 /* Convert to clock cycles. */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530765 dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
766
767 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
768 return -ENOTSUPP;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100769
770 if (dummy_clk)
771 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
772 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
773 }
774
775 writel(rd_reg, plat->regbase + CQSPI_REG_RD_INSTR);
776
777 /* set device size */
778 reg = readl(plat->regbase + CQSPI_REG_SIZE);
779 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530780 reg |= (op->addr.nbytes - 1);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100781 writel(reg, plat->regbase + CQSPI_REG_SIZE);
782 return 0;
783}
784
Simon Glassb75b15b2020-12-03 16:55:23 -0700785static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_plat *plat)
Marek Vasut8c177432016-04-27 23:38:05 +0200786{
787 u32 reg = readl(plat->regbase + CQSPI_REG_SDRAMLEVEL);
788 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
789 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
790}
791
Simon Glassb75b15b2020-12-03 16:55:23 -0700792static int cadence_qspi_wait_for_data(struct cadence_spi_plat *plat)
Marek Vasut8c177432016-04-27 23:38:05 +0200793{
794 unsigned int timeout = 10000;
795 u32 reg;
796
797 while (timeout--) {
798 reg = cadence_qspi_get_rd_sram_level(plat);
799 if (reg)
800 return reg;
801 udelay(1);
802 }
803
804 return -ETIMEDOUT;
805}
806
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530807static int
Simon Glassb75b15b2020-12-03 16:55:23 -0700808cadence_qspi_apb_indirect_read_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530809 unsigned int n_rx, u8 *rxbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100810{
Marek Vasut8c177432016-04-27 23:38:05 +0200811 unsigned int remaining = n_rx;
812 unsigned int bytes_to_read = 0;
813 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100814
Marek Vasut8c177432016-04-27 23:38:05 +0200815 writel(n_rx, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100816
817 /* Start the indirect read transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000818 writel(CQSPI_REG_INDIRECTRD_START,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100819 plat->regbase + CQSPI_REG_INDIRECTRD);
820
Marek Vasut8c177432016-04-27 23:38:05 +0200821 while (remaining > 0) {
822 ret = cadence_qspi_wait_for_data(plat);
823 if (ret < 0) {
824 printf("Indirect write timed out (%i)\n", ret);
825 goto failrd;
826 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100827
Marek Vasut8c177432016-04-27 23:38:05 +0200828 bytes_to_read = ret;
829
830 while (bytes_to_read != 0) {
Jason Rush1b4df5e2018-01-23 17:13:09 -0600831 bytes_to_read *= plat->fifo_width;
Marek Vasut8c177432016-04-27 23:38:05 +0200832 bytes_to_read = bytes_to_read > remaining ?
833 remaining : bytes_to_read;
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530834 /*
835 * Handle non-4-byte aligned access to avoid
836 * data abort.
837 */
838 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
839 readsb(plat->ahbbase, rxbuf, bytes_to_read);
840 else
841 readsl(plat->ahbbase, rxbuf,
842 bytes_to_read >> 2);
843 rxbuf += bytes_to_read;
Marek Vasut8c177432016-04-27 23:38:05 +0200844 remaining -= bytes_to_read;
845 bytes_to_read = cadence_qspi_get_rd_sram_level(plat);
846 }
847 }
848
849 /* Check indirect done status */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100850 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
851 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut8c177432016-04-27 23:38:05 +0200852 if (ret) {
853 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100854 goto failrd;
855 }
856
857 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000858 writel(CQSPI_REG_INDIRECTRD_DONE,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100859 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200860
Stefan Roese1c60fe72014-11-07 12:37:49 +0100861 return 0;
862
863failrd:
864 /* Cancel the indirect read */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000865 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100866 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200867 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100868}
869
Simon Glassb75b15b2020-12-03 16:55:23 -0700870int cadence_qspi_apb_read_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530871 const struct spi_mem_op *op)
872{
Vignesh Raghavendra68f82662019-12-05 15:46:06 +0530873 u64 from = op->addr.val;
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530874 void *buf = op->data.buf.in;
875 size_t len = op->data.nbytes;
876
877 if (plat->use_dac_mode && (from + len < plat->ahbsize)) {
878 if (len < 256 ||
879 dma_memcpy(buf, plat->ahbbase + from, len) < 0) {
880 memcpy_fromio(buf, plat->ahbbase + from, len);
881 }
882 if (!cadence_qspi_wait_idle(plat->regbase))
883 return -EIO;
884 return 0;
885 }
886
887 return cadence_qspi_apb_indirect_read_execute(plat, len, buf);
888}
889
Stefan Roese1c60fe72014-11-07 12:37:49 +0100890/* Opcode + Address (3/4 bytes) */
Simon Glassb75b15b2020-12-03 16:55:23 -0700891int cadence_qspi_apb_write_setup(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530892 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100893{
894 unsigned int reg;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530895 int ret;
896 u8 opcode;
897
898 ret = cadence_qspi_set_protocol(plat, op);
899 if (ret)
900 return ret;
901
902 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_WRITE_LSB,
903 plat->dtr);
904 if (ret)
905 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100906
Stefan Roese1c60fe72014-11-07 12:37:49 +0100907 /* Setup the indirect trigger address */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600908 writel(plat->trigger_address,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100909 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
910
Stefan Roese1c60fe72014-11-07 12:37:49 +0100911 /* Configure the opcode */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530912 if (plat->dtr)
913 opcode = op->cmd.opcode >> 8;
914 else
915 opcode = op->cmd.opcode;
916
917 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
918 reg |= plat->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
919 reg |= plat->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100920 writel(reg, plat->regbase + CQSPI_REG_WR_INSTR);
921
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530922 reg = cadence_qspi_calc_rdreg(plat);
923 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
924
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530925 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100926
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530927 if (plat->dtr) {
928 /*
929 * Some flashes like the cypress Semper flash expect a 4-byte
930 * dummy address with the Read SR command in DTR mode, but this
931 * controller does not support sending address with the Read SR
932 * command. So, disable write completion polling on the
933 * controller's side. spi-nor will take care of polling the
934 * status register.
935 */
936 reg = readl(plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
937 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
938 writel(reg, plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
939 }
940
Stefan Roese1c60fe72014-11-07 12:37:49 +0100941 reg = readl(plat->regbase + CQSPI_REG_SIZE);
942 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530943 reg |= (op->addr.nbytes - 1);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100944 writel(reg, plat->regbase + CQSPI_REG_SIZE);
945 return 0;
946}
947
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530948static int
Simon Glassb75b15b2020-12-03 16:55:23 -0700949cadence_qspi_apb_indirect_write_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530950 unsigned int n_tx, const u8 *txbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100951{
Marek Vasutdae51dd2016-04-27 23:18:55 +0200952 unsigned int page_size = plat->page_size;
953 unsigned int remaining = n_tx;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530954 const u8 *bb_txbuf = txbuf;
955 void *bounce_buf = NULL;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200956 unsigned int write_bytes;
957 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100958
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530959 /*
960 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
961 * aborts
962 */
963 if ((uintptr_t)txbuf % 4) {
964 bounce_buf = malloc(n_tx);
965 if (!bounce_buf)
966 return -ENOMEM;
967 memcpy(bounce_buf, txbuf, n_tx);
968 bb_txbuf = bounce_buf;
969 }
970
Stefan Roese1c60fe72014-11-07 12:37:49 +0100971 /* Configure the indirect read transfer bytes */
Marek Vasutdae51dd2016-04-27 23:18:55 +0200972 writel(n_tx, plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100973
974 /* Start the indirect write transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000975 writel(CQSPI_REG_INDIRECTWR_START,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100976 plat->regbase + CQSPI_REG_INDIRECTWR);
977
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530978 /*
979 * Some delay is required for the above bit to be internally
980 * synchronized by the QSPI module.
981 */
982 ndelay(plat->wr_delay);
983
Marek Vasutdae51dd2016-04-27 23:18:55 +0200984 while (remaining > 0) {
985 write_bytes = remaining > page_size ? page_size : remaining;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530986 writesl(plat->ahbbase, bb_txbuf, write_bytes >> 2);
987 if (write_bytes % 4)
988 writesb(plat->ahbbase,
989 bb_txbuf + rounddown(write_bytes, 4),
990 write_bytes % 4);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100991
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100992 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_SDRAMLEVEL,
993 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
994 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200995 if (ret) {
996 printf("Indirect write timed out (%i)\n", ret);
997 goto failwr;
998 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100999
Vignesh Rad4bd8a2018-01-24 10:44:07 +05301000 bb_txbuf += write_bytes;
Marek Vasutdae51dd2016-04-27 23:18:55 +02001001 remaining -= write_bytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +01001002 }
1003
Marek Vasutdae51dd2016-04-27 23:18:55 +02001004 /* Check indirect done status */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +01001005 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
1006 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +02001007 if (ret) {
1008 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +01001009 goto failwr;
1010 }
1011
1012 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +00001013 writel(CQSPI_REG_INDIRECTWR_DONE,
Stefan Roese1c60fe72014-11-07 12:37:49 +01001014 plat->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +05301015 if (bounce_buf)
1016 free(bounce_buf);
Stefan Roese1c60fe72014-11-07 12:37:49 +01001017 return 0;
1018
1019failwr:
1020 /* Cancel the indirect write */
Phil Edworthy3a5ae122016-11-29 12:58:30 +00001021 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Stefan Roese1c60fe72014-11-07 12:37:49 +01001022 plat->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +05301023 if (bounce_buf)
1024 free(bounce_buf);
Marek Vasutdae51dd2016-04-27 23:18:55 +02001025 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +01001026}
1027
Simon Glassb75b15b2020-12-03 16:55:23 -07001028int cadence_qspi_apb_write_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +05301029 const struct spi_mem_op *op)
1030{
1031 u32 to = op->addr.val;
1032 const void *buf = op->data.buf.out;
1033 size_t len = op->data.nbytes;
1034
Pratyush Yadave1814ad2021-06-26 00:47:09 +05301035 /*
1036 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
1037 * address (all 0s) with the read status register command in DTR mode.
1038 * But this controller does not support sending dummy address bytes to
1039 * the flash when it is polling the write completion register in DTR
1040 * mode. So, we can not use direct mode when in DTR mode for writing
1041 * data.
1042 */
1043 if (!plat->dtr && plat->use_dac_mode && (to + len < plat->ahbsize)) {
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +05301044 memcpy_toio(plat->ahbbase + to, buf, len);
1045 if (!cadence_qspi_wait_idle(plat->regbase))
1046 return -EIO;
1047 return 0;
1048 }
1049
1050 return cadence_qspi_apb_indirect_write_execute(plat, len, buf);
1051}
1052
Stefan Roese1c60fe72014-11-07 12:37:49 +01001053void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
1054{
1055 unsigned int reg;
1056
1057 /* enter XiP mode immediately and enable direct mode */
1058 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +00001059 reg |= CQSPI_REG_CONFIG_ENABLE;
1060 reg |= CQSPI_REG_CONFIG_DIRECT;
1061 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese1c60fe72014-11-07 12:37:49 +01001062 writel(reg, reg_base + CQSPI_REG_CONFIG);
1063
1064 /* keep the XiP mode */
1065 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
1066
1067 /* Enable mode bit at devrd */
1068 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
1069 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
1070 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
1071}