blob: c36a652211a02a851248c61a908c72f2a1a21a34 [file] [log] [blame]
Stefan Roese1c60fe72014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -060029#include <log.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010030#include <asm/io.h>
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +053031#include <dma.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060032#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060033#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090034#include <linux/errno.h>
Marek Vasutdae51dd2016-04-27 23:18:55 +020035#include <wait_bit.h>
Vignesh R4ca60192016-07-06 10:20:56 +053036#include <spi.h>
Vignesh Raghavendra27516a32020-01-27 10:36:39 +053037#include <spi-mem.h>
Vignesh Rad4bd8a2018-01-24 10:44:07 +053038#include <malloc.h>
Stefan Roese1c60fe72014-11-07 12:37:49 +010039#include "cadence_qspi.h"
40
Phil Edworthy3a5ae122016-11-29 12:58:30 +000041#define CQSPI_REG_POLL_US 1 /* 1us */
42#define CQSPI_REG_RETRY 10000
43#define CQSPI_POLL_IDLE_RETRY 3
Stefan Roese1c60fe72014-11-07 12:37:49 +010044
Stefan Roese1c60fe72014-11-07 12:37:49 +010045/* Transfer mode */
Phil Edworthy3a5ae122016-11-29 12:58:30 +000046#define CQSPI_INST_TYPE_SINGLE 0
47#define CQSPI_INST_TYPE_DUAL 1
48#define CQSPI_INST_TYPE_QUAD 2
Vignesh Raghavendra68f82662019-12-05 15:46:06 +053049#define CQSPI_INST_TYPE_OCTAL 3
Stefan Roese1c60fe72014-11-07 12:37:49 +010050
Phil Edworthy3a5ae122016-11-29 12:58:30 +000051#define CQSPI_STIG_DATA_LEN_MAX 8
Stefan Roese1c60fe72014-11-07 12:37:49 +010052
Phil Edworthy3a5ae122016-11-29 12:58:30 +000053#define CQSPI_DUMMY_CLKS_PER_BYTE 8
Pratyush Yadave1814ad2021-06-26 00:47:09 +053054#define CQSPI_DUMMY_CLKS_MAX 31
Stefan Roese1c60fe72014-11-07 12:37:49 +010055
Stefan Roese1c60fe72014-11-07 12:37:49 +010056/****************************************************************************
57 * Controller's configuration and status register (offset from QSPI_BASE)
58 ****************************************************************************/
59#define CQSPI_REG_CONFIG 0x00
Phil Edworthy3a5ae122016-11-29 12:58:30 +000060#define CQSPI_REG_CONFIG_ENABLE BIT(0)
Phil Edworthydd18c6f2016-11-29 12:58:29 +000061#define CQSPI_REG_CONFIG_CLK_POL BIT(1)
62#define CQSPI_REG_CONFIG_CLK_PHA BIT(2)
Phil Edworthy3a5ae122016-11-29 12:58:30 +000063#define CQSPI_REG_CONFIG_DIRECT BIT(7)
64#define CQSPI_REG_CONFIG_DECODE BIT(9)
65#define CQSPI_REG_CONFIG_XIP_IMM BIT(18)
Stefan Roese1c60fe72014-11-07 12:37:49 +010066#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
67#define CQSPI_REG_CONFIG_BAUD_LSB 19
Pratyush Yadave1814ad2021-06-26 00:47:09 +053068#define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
69#define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
Stefan Roese1c60fe72014-11-07 12:37:49 +010070#define CQSPI_REG_CONFIG_IDLE_LSB 31
71#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
72#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
73
74#define CQSPI_REG_RD_INSTR 0x04
75#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
76#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
77#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
78#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
79#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
80#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
81#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
82#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
83#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
84#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
85
86#define CQSPI_REG_WR_INSTR 0x08
87#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
Pratyush Yadave1814ad2021-06-26 00:47:09 +053088#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
Ley Foon Tan08987992019-02-27 13:36:14 +080089#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
Stefan Roese1c60fe72014-11-07 12:37:49 +010090
91#define CQSPI_REG_DELAY 0x0C
92#define CQSPI_REG_DELAY_TSLCH_LSB 0
93#define CQSPI_REG_DELAY_TCHSH_LSB 8
94#define CQSPI_REG_DELAY_TSD2D_LSB 16
95#define CQSPI_REG_DELAY_TSHSL_LSB 24
96#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
97#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
98#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
99#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
100
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000101#define CQSPI_REG_RD_DATA_CAPTURE 0x10
102#define CQSPI_REG_RD_DATA_CAPTURE_BYPASS BIT(0)
103#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB 1
104#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK 0xF
Stefan Roese1c60fe72014-11-07 12:37:49 +0100105
106#define CQSPI_REG_SIZE 0x14
107#define CQSPI_REG_SIZE_ADDRESS_LSB 0
108#define CQSPI_REG_SIZE_PAGE_LSB 4
109#define CQSPI_REG_SIZE_BLOCK_LSB 16
110#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
111#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
112#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
113
114#define CQSPI_REG_SRAMPARTITION 0x18
115#define CQSPI_REG_INDIRECTTRIGGER 0x1C
116
117#define CQSPI_REG_REMAP 0x24
118#define CQSPI_REG_MODE_BIT 0x28
119
120#define CQSPI_REG_SDRAMLEVEL 0x2C
121#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
122#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
123#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
124#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
125
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530126#define CQSPI_REG_WR_COMPLETION_CTRL 0x38
127#define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
128
Stefan Roese1c60fe72014-11-07 12:37:49 +0100129#define CQSPI_REG_IRQSTATUS 0x40
130#define CQSPI_REG_IRQMASK 0x44
131
132#define CQSPI_REG_INDIRECTRD 0x60
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000133#define CQSPI_REG_INDIRECTRD_START BIT(0)
134#define CQSPI_REG_INDIRECTRD_CANCEL BIT(1)
135#define CQSPI_REG_INDIRECTRD_INPROGRESS BIT(2)
136#define CQSPI_REG_INDIRECTRD_DONE BIT(5)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100137
138#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
139#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
140#define CQSPI_REG_INDIRECTRDBYTES 0x6C
141
142#define CQSPI_REG_CMDCTRL 0x90
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000143#define CQSPI_REG_CMDCTRL_EXECUTE BIT(0)
144#define CQSPI_REG_CMDCTRL_INPROGRESS BIT(1)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100145#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
146#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
147#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
148#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
149#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
150#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
151#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
152#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
153#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
154#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
155#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
156#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
157#define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
158
159#define CQSPI_REG_INDIRECTWR 0x70
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000160#define CQSPI_REG_INDIRECTWR_START BIT(0)
161#define CQSPI_REG_INDIRECTWR_CANCEL BIT(1)
162#define CQSPI_REG_INDIRECTWR_INPROGRESS BIT(2)
163#define CQSPI_REG_INDIRECTWR_DONE BIT(5)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100164
165#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
166#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
167#define CQSPI_REG_INDIRECTWRBYTES 0x7C
168
169#define CQSPI_REG_CMDADDRESS 0x94
170#define CQSPI_REG_CMDREADDATALOWER 0xA0
171#define CQSPI_REG_CMDREADDATAUPPER 0xA4
172#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
173#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
174
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530175#define CQSPI_REG_OP_EXT_LOWER 0xE0
176#define CQSPI_REG_OP_EXT_READ_LSB 24
177#define CQSPI_REG_OP_EXT_WRITE_LSB 16
178#define CQSPI_REG_OP_EXT_STIG_LSB 0
179
Stefan Roese1c60fe72014-11-07 12:37:49 +0100180#define CQSPI_REG_IS_IDLE(base) \
181 ((readl(base + CQSPI_REG_CONFIG) >> \
182 CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
183
Stefan Roese1c60fe72014-11-07 12:37:49 +0100184#define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
185 (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
186 CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
187
188#define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
189 (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
190 CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
191
Stefan Roese1c60fe72014-11-07 12:37:49 +0100192void cadence_qspi_apb_controller_enable(void *reg_base)
193{
194 unsigned int reg;
195 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000196 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100197 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100198}
199
200void cadence_qspi_apb_controller_disable(void *reg_base)
201{
202 unsigned int reg;
203 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000204 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100205 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100206}
207
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530208void cadence_qspi_apb_dac_mode_enable(void *reg_base)
209{
210 unsigned int reg;
211
212 reg = readl(reg_base + CQSPI_REG_CONFIG);
213 reg |= CQSPI_REG_CONFIG_DIRECT;
214 writel(reg, reg_base + CQSPI_REG_CONFIG);
215}
216
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530217static unsigned int cadence_qspi_calc_dummy(const struct spi_mem_op *op,
218 bool dtr)
219{
220 unsigned int dummy_clk;
221
222 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
223 if (dtr)
224 dummy_clk /= 2;
225
226 return dummy_clk;
227}
228
229static u32 cadence_qspi_calc_rdreg(struct cadence_spi_plat *plat)
230{
231 u32 rdreg = 0;
232
233 rdreg |= plat->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
234 rdreg |= plat->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
235 rdreg |= plat->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
236
237 return rdreg;
238}
239
240static int cadence_qspi_buswidth_to_inst_type(u8 buswidth)
241{
242 switch (buswidth) {
243 case 0:
244 case 1:
245 return CQSPI_INST_TYPE_SINGLE;
246
247 case 2:
248 return CQSPI_INST_TYPE_DUAL;
249
250 case 4:
251 return CQSPI_INST_TYPE_QUAD;
252
253 case 8:
254 return CQSPI_INST_TYPE_OCTAL;
255
256 default:
257 return -ENOTSUPP;
258 }
259}
260
261static int cadence_qspi_set_protocol(struct cadence_spi_plat *plat,
262 const struct spi_mem_op *op)
263{
264 int ret;
265
266 plat->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
267
268 ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
269 if (ret < 0)
270 return ret;
271 plat->inst_width = ret;
272
273 ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
274 if (ret < 0)
275 return ret;
276 plat->addr_width = ret;
277
278 ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
279 if (ret < 0)
280 return ret;
281 plat->data_width = ret;
282
283 return 0;
284}
285
Stefan Roese1c60fe72014-11-07 12:37:49 +0100286/* Return 1 if idle, otherwise return 0 (busy). */
287static unsigned int cadence_qspi_wait_idle(void *reg_base)
288{
289 unsigned int start, count = 0;
290 /* timeout in unit of ms */
291 unsigned int timeout = 5000;
292
293 start = get_timer(0);
294 for ( ; get_timer(start) < timeout ; ) {
295 if (CQSPI_REG_IS_IDLE(reg_base))
296 count++;
297 else
298 count = 0;
299 /*
300 * Ensure the QSPI controller is in true idle state after
301 * reading back the same idle status consecutively
302 */
303 if (count >= CQSPI_POLL_IDLE_RETRY)
304 return 1;
305 }
306
307 /* Timeout, still in busy mode. */
308 printf("QSPI: QSPI is still busy after poll for %d times.\n",
309 CQSPI_REG_RETRY);
310 return 0;
311}
312
313void cadence_qspi_apb_readdata_capture(void *reg_base,
314 unsigned int bypass, unsigned int delay)
315{
316 unsigned int reg;
317 cadence_qspi_apb_controller_disable(reg_base);
318
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000319 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100320
321 if (bypass)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000322 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100323 else
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000324 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100325
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000326 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
327 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100328
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000329 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
330 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100331
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000332 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100333
334 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100335}
336
337void cadence_qspi_apb_config_baudrate_div(void *reg_base,
338 unsigned int ref_clk_hz, unsigned int sclk_hz)
339{
340 unsigned int reg;
341 unsigned int div;
342
343 cadence_qspi_apb_controller_disable(reg_base);
344 reg = readl(reg_base + CQSPI_REG_CONFIG);
345 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
346
Phil Edworthy8f24a442016-11-29 12:58:27 +0000347 /*
348 * The baud_div field in the config reg is 4 bits, and the ref clock is
349 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
350 * SPI clock rate is less than or equal to the requested clock rate.
351 */
352 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100353
Chin Liang See91b2c192016-08-07 22:50:40 +0800354 /* ensure the baud rate doesn't exceed the max value */
355 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
356 div = CQSPI_REG_CONFIG_BAUD_MASK;
357
Phil Edworthy67824ad2016-11-29 12:58:28 +0000358 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
359 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
360
Chin Liang See91b2c192016-08-07 22:50:40 +0800361 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100362 writel(reg, reg_base + CQSPI_REG_CONFIG);
363
364 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100365}
366
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000367void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100368{
369 unsigned int reg;
370
371 cadence_qspi_apb_controller_disable(reg_base);
372 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000373 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100374
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000375 if (mode & SPI_CPOL)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000376 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthyeef2edc2016-11-29 12:58:31 +0000377 if (mode & SPI_CPHA)
Phil Edworthydd18c6f2016-11-29 12:58:29 +0000378 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100379
380 writel(reg, reg_base + CQSPI_REG_CONFIG);
381
382 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100383}
384
385void cadence_qspi_apb_chipselect(void *reg_base,
386 unsigned int chip_select, unsigned int decoder_enable)
387{
388 unsigned int reg;
389
390 cadence_qspi_apb_controller_disable(reg_base);
391
392 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
393 decoder_enable);
394
395 reg = readl(reg_base + CQSPI_REG_CONFIG);
396 /* docoder */
397 if (decoder_enable) {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000398 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100399 } else {
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000400 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100401 /* Convert CS if without decoder.
402 * CS0 to 4b'1110
403 * CS1 to 4b'1101
404 * CS2 to 4b'1011
405 * CS3 to 4b'0111
406 */
407 chip_select = 0xF & ~(1 << chip_select);
408 }
409
410 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
411 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
412 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
413 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
414 writel(reg, reg_base + CQSPI_REG_CONFIG);
415
416 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100417}
418
419void cadence_qspi_apb_delay(void *reg_base,
420 unsigned int ref_clk, unsigned int sclk_hz,
421 unsigned int tshsl_ns, unsigned int tsd2d_ns,
422 unsigned int tchsh_ns, unsigned int tslch_ns)
423{
424 unsigned int ref_clk_ns;
425 unsigned int sclk_ns;
426 unsigned int tshsl, tchsh, tslch, tsd2d;
427 unsigned int reg;
428
429 cadence_qspi_apb_controller_disable(reg_base);
430
431 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000432 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100433
434 /* Convert to ns. */
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000435 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100436
Phil Edworthy1fdd9232016-11-29 12:58:33 +0000437 /* The controller adds additional delay to that programmed in the reg */
438 if (tshsl_ns >= sclk_ns + ref_clk_ns)
439 tshsl_ns -= sclk_ns + ref_clk_ns;
440 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
441 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
442 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
443 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
444 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
445 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100446
447 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
448 << CQSPI_REG_DELAY_TSHSL_LSB);
449 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
450 << CQSPI_REG_DELAY_TCHSH_LSB);
451 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
452 << CQSPI_REG_DELAY_TSLCH_LSB);
453 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
454 << CQSPI_REG_DELAY_TSD2D_LSB);
455 writel(reg, reg_base + CQSPI_REG_DELAY);
456
457 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100458}
459
Simon Glassb75b15b2020-12-03 16:55:23 -0700460void cadence_qspi_apb_controller_init(struct cadence_spi_plat *plat)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100461{
462 unsigned reg;
463
464 cadence_qspi_apb_controller_disable(plat->regbase);
465
466 /* Configure the device size and address bytes */
467 reg = readl(plat->regbase + CQSPI_REG_SIZE);
468 /* Clear the previous value */
469 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
470 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
471 reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
472 reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
473 writel(reg, plat->regbase + CQSPI_REG_SIZE);
474
475 /* Configure the remap address register, no remap */
476 writel(0, plat->regbase + CQSPI_REG_REMAP);
477
Vikas Manocha215cea02015-07-02 18:29:43 -0700478 /* Indirect mode configurations */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600479 writel(plat->fifo_depth / 2, plat->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manocha215cea02015-07-02 18:29:43 -0700480
Stefan Roese1c60fe72014-11-07 12:37:49 +0100481 /* Disable all interrupts */
482 writel(0, plat->regbase + CQSPI_REG_IRQMASK);
483
484 cadence_qspi_apb_controller_enable(plat->regbase);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100485}
486
487static int cadence_qspi_apb_exec_flash_cmd(void *reg_base,
488 unsigned int reg)
489{
490 unsigned int retry = CQSPI_REG_RETRY;
491
492 /* Write the CMDCTRL without start execution. */
493 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
494 /* Start execute */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000495 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100496 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
497
498 while (retry--) {
499 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000500 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100501 break;
502 udelay(1);
503 }
504
505 if (!retry) {
506 printf("QSPI: flash command execution timeout\n");
507 return -EIO;
508 }
509
510 /* Polling QSPI idle status. */
511 if (!cadence_qspi_wait_idle(reg_base))
512 return -EIO;
513
514 return 0;
515}
516
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530517static int cadence_qspi_setup_opcode_ext(struct cadence_spi_plat *plat,
518 const struct spi_mem_op *op,
519 unsigned int shift)
520{
521 unsigned int reg;
522 u8 ext;
523
524 if (op->cmd.nbytes != 2)
525 return -EINVAL;
526
527 /* Opcode extension is the LSB. */
528 ext = op->cmd.opcode & 0xff;
529
530 reg = readl(plat->regbase + CQSPI_REG_OP_EXT_LOWER);
531 reg &= ~(0xff << shift);
532 reg |= ext << shift;
533 writel(reg, plat->regbase + CQSPI_REG_OP_EXT_LOWER);
534
535 return 0;
536}
537
538static int cadence_qspi_enable_dtr(struct cadence_spi_plat *plat,
539 const struct spi_mem_op *op,
540 unsigned int shift,
541 bool enable)
542{
543 unsigned int reg;
544 int ret;
545
546 reg = readl(plat->regbase + CQSPI_REG_CONFIG);
547
548 if (enable) {
549 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
550 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
551
552 /* Set up command opcode extension. */
553 ret = cadence_qspi_setup_opcode_ext(plat, op, shift);
554 if (ret)
555 return ret;
556 } else {
557 reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
558 reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
559 }
560
561 writel(reg, plat->regbase + CQSPI_REG_CONFIG);
562
563 return 0;
564}
565
566int cadence_qspi_apb_command_read_setup(struct cadence_spi_plat *plat,
567 const struct spi_mem_op *op)
568{
569 int ret;
570 unsigned int reg;
571
572 ret = cadence_qspi_set_protocol(plat, op);
573 if (ret)
574 return ret;
575
576 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
577 plat->dtr);
578 if (ret)
579 return ret;
580
581 reg = cadence_qspi_calc_rdreg(plat);
582 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
583
584 return 0;
585}
586
Stefan Roese1c60fe72014-11-07 12:37:49 +0100587/* For command RDID, RDSR. */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530588int cadence_qspi_apb_command_read(struct cadence_spi_plat *plat,
589 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100590{
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530591 void *reg_base = plat->regbase;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100592 unsigned int reg;
593 unsigned int read_len;
594 int status;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530595 unsigned int rxlen = op->data.nbytes;
596 void *rxbuf = op->data.buf.in;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530597 unsigned int dummy_clk;
598 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100599
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530600 if (rxlen > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
601 printf("QSPI: Invalid input arguments rxlen %u\n", rxlen);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100602 return -EINVAL;
603 }
604
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530605 if (plat->dtr)
606 opcode = op->cmd.opcode >> 8;
607 else
608 opcode = op->cmd.opcode;
609
610 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
611
612 /* Set up dummy cycles. */
613 dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
614 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
615 return -ENOTSUPP;
616
617 if (dummy_clk)
618 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
619 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100620
621 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
622
623 /* 0 means 1 byte. */
624 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
625 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
626 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
627 if (status != 0)
628 return status;
629
630 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
631
632 /* Put the read value into rx_buf */
633 read_len = (rxlen > 4) ? 4 : rxlen;
634 memcpy(rxbuf, &reg, read_len);
635 rxbuf += read_len;
636
637 if (rxlen > 4) {
638 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
639
640 read_len = rxlen - read_len;
641 memcpy(rxbuf, &reg, read_len);
642 }
643 return 0;
644}
645
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530646int cadence_qspi_apb_command_write_setup(struct cadence_spi_plat *plat,
647 const struct spi_mem_op *op)
648{
649 int ret;
650 unsigned int reg;
651
652 ret = cadence_qspi_set_protocol(plat, op);
653 if (ret)
654 return ret;
655
656 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
657 plat->dtr);
658 if (ret)
659 return ret;
660
661 reg = cadence_qspi_calc_rdreg(plat);
662 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
663
664 return 0;
665}
666
Stefan Roese1c60fe72014-11-07 12:37:49 +0100667/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530668int cadence_qspi_apb_command_write(struct cadence_spi_plat *plat,
669 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100670{
671 unsigned int reg = 0;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100672 unsigned int wr_data;
673 unsigned int wr_len;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530674 unsigned int txlen = op->data.nbytes;
675 const void *txbuf = op->data.buf.out;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530676 void *reg_base = plat->regbase;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530677 u32 addr;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530678 u8 opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100679
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530680 /* Reorder address to SPI bus order if only transferring address */
681 if (!txlen) {
682 addr = cpu_to_be32(op->addr.val);
683 if (op->addr.nbytes == 3)
684 addr >>= 8;
685 txbuf = &addr;
686 txlen = op->addr.nbytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100687 }
688
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530689 if (txlen > CQSPI_STIG_DATA_LEN_MAX) {
690 printf("QSPI: Invalid input arguments txlen %u\n", txlen);
691 return -EINVAL;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100692 }
693
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530694 if (plat->dtr)
695 opcode = op->cmd.opcode >> 8;
696 else
697 opcode = op->cmd.opcode;
698
699 reg |= opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530700
Stefan Roese1c60fe72014-11-07 12:37:49 +0100701 if (txlen) {
702 /* writing data = yes */
703 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
704 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
705 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
706
707 wr_len = txlen > 4 ? 4 : txlen;
708 memcpy(&wr_data, txbuf, wr_len);
709 writel(wr_data, reg_base +
710 CQSPI_REG_CMDWRITEDATALOWER);
711
712 if (txlen > 4) {
713 txbuf += wr_len;
714 wr_len = txlen - wr_len;
715 memcpy(&wr_data, txbuf, wr_len);
716 writel(wr_data, reg_base +
717 CQSPI_REG_CMDWRITEDATAUPPER);
718 }
719 }
720
721 /* Execute the command */
722 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
723}
724
725/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Simon Glassb75b15b2020-12-03 16:55:23 -0700726int cadence_qspi_apb_read_setup(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530727 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100728{
729 unsigned int reg;
730 unsigned int rd_reg;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100731 unsigned int dummy_clk;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530732 unsigned int dummy_bytes = op->dummy.nbytes;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530733 int ret;
734 u8 opcode;
735
736 ret = cadence_qspi_set_protocol(plat, op);
737 if (ret)
738 return ret;
739
740 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_READ_LSB,
741 plat->dtr);
742 if (ret)
743 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100744
745 /* Setup the indirect trigger address */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600746 writel(plat->trigger_address,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100747 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
748
Stefan Roese1c60fe72014-11-07 12:37:49 +0100749 /* Configure the opcode */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530750 if (plat->dtr)
751 opcode = op->cmd.opcode >> 8;
752 else
753 opcode = op->cmd.opcode;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100754
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530755 rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
756 rd_reg |= cadence_qspi_calc_rdreg(plat);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100757
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530758 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100759
Stefan Roese1c60fe72014-11-07 12:37:49 +0100760 if (dummy_bytes) {
Stefan Roese1c60fe72014-11-07 12:37:49 +0100761 /* Convert to clock cycles. */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530762 dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
763
764 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
765 return -ENOTSUPP;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100766
767 if (dummy_clk)
768 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
769 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
770 }
771
772 writel(rd_reg, plat->regbase + CQSPI_REG_RD_INSTR);
773
774 /* set device size */
775 reg = readl(plat->regbase + CQSPI_REG_SIZE);
776 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530777 reg |= (op->addr.nbytes - 1);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100778 writel(reg, plat->regbase + CQSPI_REG_SIZE);
779 return 0;
780}
781
Simon Glassb75b15b2020-12-03 16:55:23 -0700782static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_plat *plat)
Marek Vasut8c177432016-04-27 23:38:05 +0200783{
784 u32 reg = readl(plat->regbase + CQSPI_REG_SDRAMLEVEL);
785 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
786 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
787}
788
Simon Glassb75b15b2020-12-03 16:55:23 -0700789static int cadence_qspi_wait_for_data(struct cadence_spi_plat *plat)
Marek Vasut8c177432016-04-27 23:38:05 +0200790{
791 unsigned int timeout = 10000;
792 u32 reg;
793
794 while (timeout--) {
795 reg = cadence_qspi_get_rd_sram_level(plat);
796 if (reg)
797 return reg;
798 udelay(1);
799 }
800
801 return -ETIMEDOUT;
802}
803
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530804static int
Simon Glassb75b15b2020-12-03 16:55:23 -0700805cadence_qspi_apb_indirect_read_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530806 unsigned int n_rx, u8 *rxbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100807{
Marek Vasut8c177432016-04-27 23:38:05 +0200808 unsigned int remaining = n_rx;
809 unsigned int bytes_to_read = 0;
810 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100811
Marek Vasut8c177432016-04-27 23:38:05 +0200812 writel(n_rx, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100813
814 /* Start the indirect read transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000815 writel(CQSPI_REG_INDIRECTRD_START,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100816 plat->regbase + CQSPI_REG_INDIRECTRD);
817
Marek Vasut8c177432016-04-27 23:38:05 +0200818 while (remaining > 0) {
819 ret = cadence_qspi_wait_for_data(plat);
820 if (ret < 0) {
821 printf("Indirect write timed out (%i)\n", ret);
822 goto failrd;
823 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100824
Marek Vasut8c177432016-04-27 23:38:05 +0200825 bytes_to_read = ret;
826
827 while (bytes_to_read != 0) {
Jason Rush1b4df5e2018-01-23 17:13:09 -0600828 bytes_to_read *= plat->fifo_width;
Marek Vasut8c177432016-04-27 23:38:05 +0200829 bytes_to_read = bytes_to_read > remaining ?
830 remaining : bytes_to_read;
Goldschmidt Simon16cbd092018-01-24 10:44:05 +0530831 /*
832 * Handle non-4-byte aligned access to avoid
833 * data abort.
834 */
835 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
836 readsb(plat->ahbbase, rxbuf, bytes_to_read);
837 else
838 readsl(plat->ahbbase, rxbuf,
839 bytes_to_read >> 2);
840 rxbuf += bytes_to_read;
Marek Vasut8c177432016-04-27 23:38:05 +0200841 remaining -= bytes_to_read;
842 bytes_to_read = cadence_qspi_get_rd_sram_level(plat);
843 }
844 }
845
846 /* Check indirect done status */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100847 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
848 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut8c177432016-04-27 23:38:05 +0200849 if (ret) {
850 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100851 goto failrd;
852 }
853
854 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000855 writel(CQSPI_REG_INDIRECTRD_DONE,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100856 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200857
Stefan Roese1c60fe72014-11-07 12:37:49 +0100858 return 0;
859
860failrd:
861 /* Cancel the indirect read */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000862 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100863 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut8c177432016-04-27 23:38:05 +0200864 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100865}
866
Simon Glassb75b15b2020-12-03 16:55:23 -0700867int cadence_qspi_apb_read_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530868 const struct spi_mem_op *op)
869{
Vignesh Raghavendra68f82662019-12-05 15:46:06 +0530870 u64 from = op->addr.val;
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530871 void *buf = op->data.buf.in;
872 size_t len = op->data.nbytes;
873
874 if (plat->use_dac_mode && (from + len < plat->ahbsize)) {
875 if (len < 256 ||
876 dma_memcpy(buf, plat->ahbbase + from, len) < 0) {
877 memcpy_fromio(buf, plat->ahbbase + from, len);
878 }
879 if (!cadence_qspi_wait_idle(plat->regbase))
880 return -EIO;
881 return 0;
882 }
883
884 return cadence_qspi_apb_indirect_read_execute(plat, len, buf);
885}
886
Stefan Roese1c60fe72014-11-07 12:37:49 +0100887/* Opcode + Address (3/4 bytes) */
Simon Glassb75b15b2020-12-03 16:55:23 -0700888int cadence_qspi_apb_write_setup(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530889 const struct spi_mem_op *op)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100890{
891 unsigned int reg;
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530892 int ret;
893 u8 opcode;
894
895 ret = cadence_qspi_set_protocol(plat, op);
896 if (ret)
897 return ret;
898
899 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_WRITE_LSB,
900 plat->dtr);
901 if (ret)
902 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100903
Stefan Roese1c60fe72014-11-07 12:37:49 +0100904 /* Setup the indirect trigger address */
Jason Rush1b4df5e2018-01-23 17:13:09 -0600905 writel(plat->trigger_address,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100906 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
907
Stefan Roese1c60fe72014-11-07 12:37:49 +0100908 /* Configure the opcode */
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530909 if (plat->dtr)
910 opcode = op->cmd.opcode >> 8;
911 else
912 opcode = op->cmd.opcode;
913
914 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
915 reg |= plat->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
916 reg |= plat->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100917 writel(reg, plat->regbase + CQSPI_REG_WR_INSTR);
918
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530919 reg = cadence_qspi_calc_rdreg(plat);
920 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
921
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530922 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100923
Pratyush Yadave1814ad2021-06-26 00:47:09 +0530924 if (plat->dtr) {
925 /*
926 * Some flashes like the cypress Semper flash expect a 4-byte
927 * dummy address with the Read SR command in DTR mode, but this
928 * controller does not support sending address with the Read SR
929 * command. So, disable write completion polling on the
930 * controller's side. spi-nor will take care of polling the
931 * status register.
932 */
933 reg = readl(plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
934 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
935 writel(reg, plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
936 }
937
Stefan Roese1c60fe72014-11-07 12:37:49 +0100938 reg = readl(plat->regbase + CQSPI_REG_SIZE);
939 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendra27516a32020-01-27 10:36:39 +0530940 reg |= (op->addr.nbytes - 1);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100941 writel(reg, plat->regbase + CQSPI_REG_SIZE);
942 return 0;
943}
944
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530945static int
Simon Glassb75b15b2020-12-03 16:55:23 -0700946cadence_qspi_apb_indirect_write_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +0530947 unsigned int n_tx, const u8 *txbuf)
Stefan Roese1c60fe72014-11-07 12:37:49 +0100948{
Marek Vasutdae51dd2016-04-27 23:18:55 +0200949 unsigned int page_size = plat->page_size;
950 unsigned int remaining = n_tx;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530951 const u8 *bb_txbuf = txbuf;
952 void *bounce_buf = NULL;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200953 unsigned int write_bytes;
954 int ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100955
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530956 /*
957 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
958 * aborts
959 */
960 if ((uintptr_t)txbuf % 4) {
961 bounce_buf = malloc(n_tx);
962 if (!bounce_buf)
963 return -ENOMEM;
964 memcpy(bounce_buf, txbuf, n_tx);
965 bb_txbuf = bounce_buf;
966 }
967
Stefan Roese1c60fe72014-11-07 12:37:49 +0100968 /* Configure the indirect read transfer bytes */
Marek Vasutdae51dd2016-04-27 23:18:55 +0200969 writel(n_tx, plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100970
971 /* Start the indirect write transfer */
Phil Edworthy3a5ae122016-11-29 12:58:30 +0000972 writel(CQSPI_REG_INDIRECTWR_START,
Stefan Roese1c60fe72014-11-07 12:37:49 +0100973 plat->regbase + CQSPI_REG_INDIRECTWR);
974
Pratyush Yadav8dcf3e22021-06-26 00:47:08 +0530975 /*
976 * Some delay is required for the above bit to be internally
977 * synchronized by the QSPI module.
978 */
979 ndelay(plat->wr_delay);
980
Marek Vasutdae51dd2016-04-27 23:18:55 +0200981 while (remaining > 0) {
982 write_bytes = remaining > page_size ? page_size : remaining;
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530983 writesl(plat->ahbbase, bb_txbuf, write_bytes >> 2);
984 if (write_bytes % 4)
985 writesb(plat->ahbbase,
986 bb_txbuf + rounddown(write_bytes, 4),
987 write_bytes % 4);
Stefan Roese1c60fe72014-11-07 12:37:49 +0100988
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100989 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_SDRAMLEVEL,
990 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
991 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +0200992 if (ret) {
993 printf("Indirect write timed out (%i)\n", ret);
994 goto failwr;
995 }
Stefan Roese1c60fe72014-11-07 12:37:49 +0100996
Vignesh Rad4bd8a2018-01-24 10:44:07 +0530997 bb_txbuf += write_bytes;
Marek Vasutdae51dd2016-04-27 23:18:55 +0200998 remaining -= write_bytes;
Stefan Roese1c60fe72014-11-07 12:37:49 +0100999 }
1000
Marek Vasutdae51dd2016-04-27 23:18:55 +02001001 /* Check indirect done status */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +01001002 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
1003 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasutdae51dd2016-04-27 23:18:55 +02001004 if (ret) {
1005 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese1c60fe72014-11-07 12:37:49 +01001006 goto failwr;
1007 }
1008
1009 /* Clear indirect completion status */
Phil Edworthy3a5ae122016-11-29 12:58:30 +00001010 writel(CQSPI_REG_INDIRECTWR_DONE,
Stefan Roese1c60fe72014-11-07 12:37:49 +01001011 plat->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +05301012 if (bounce_buf)
1013 free(bounce_buf);
Stefan Roese1c60fe72014-11-07 12:37:49 +01001014 return 0;
1015
1016failwr:
1017 /* Cancel the indirect write */
Phil Edworthy3a5ae122016-11-29 12:58:30 +00001018 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Stefan Roese1c60fe72014-11-07 12:37:49 +01001019 plat->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Rad4bd8a2018-01-24 10:44:07 +05301020 if (bounce_buf)
1021 free(bounce_buf);
Marek Vasutdae51dd2016-04-27 23:18:55 +02001022 return ret;
Stefan Roese1c60fe72014-11-07 12:37:49 +01001023}
1024
Simon Glassb75b15b2020-12-03 16:55:23 -07001025int cadence_qspi_apb_write_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +05301026 const struct spi_mem_op *op)
1027{
1028 u32 to = op->addr.val;
1029 const void *buf = op->data.buf.out;
1030 size_t len = op->data.nbytes;
1031
Pratyush Yadave1814ad2021-06-26 00:47:09 +05301032 /*
1033 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
1034 * address (all 0s) with the read status register command in DTR mode.
1035 * But this controller does not support sending dummy address bytes to
1036 * the flash when it is polling the write completion register in DTR
1037 * mode. So, we can not use direct mode when in DTR mode for writing
1038 * data.
1039 */
1040 if (!plat->dtr && plat->use_dac_mode && (to + len < plat->ahbsize)) {
Vignesh Raghavendra6b7df222020-01-27 10:36:40 +05301041 memcpy_toio(plat->ahbbase + to, buf, len);
1042 if (!cadence_qspi_wait_idle(plat->regbase))
1043 return -EIO;
1044 return 0;
1045 }
1046
1047 return cadence_qspi_apb_indirect_write_execute(plat, len, buf);
1048}
1049
Stefan Roese1c60fe72014-11-07 12:37:49 +01001050void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
1051{
1052 unsigned int reg;
1053
1054 /* enter XiP mode immediately and enable direct mode */
1055 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy3a5ae122016-11-29 12:58:30 +00001056 reg |= CQSPI_REG_CONFIG_ENABLE;
1057 reg |= CQSPI_REG_CONFIG_DIRECT;
1058 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese1c60fe72014-11-07 12:37:49 +01001059 writel(reg, reg_base + CQSPI_REG_CONFIG);
1060
1061 /* keep the XiP mode */
1062 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
1063
1064 /* Enable mode bit at devrd */
1065 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
1066 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
1067 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
1068}