blob: e9f0b343abb9f7c15d45a9b7d158def7342313ab [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +01002/*
3 * Copyright (C) 2017 Álvaro Fernández Rojas <noltari@gmail.com>
4 *
5 * Derived from linux/drivers/spi/spi-bcm63xx-hsspi.c:
6 * Copyright (C) 2000-2010 Broadcom Corporation
7 * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org>
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +01008 */
9
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +010010#include <clk.h>
11#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <malloc.h>
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +010014#include <spi.h>
15#include <reset.h>
16#include <wait_bit.h>
17#include <asm/io.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060018#include <linux/bitops.h>
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +010019
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +010020#define HSSPI_PP 0
21
William Zhang9a0ff5d2023-06-07 16:37:04 -070022/*
23 * The maximum frequency for SPI synchronous mode is 30MHz for some chips and
24 * 25MHz for some others. This depends on the chip layout and SPI signals
25 * distance to the pad. We use the lower of these values to cover all relevant
26 * chips.
27 */
28#define SPI_MAX_SYNC_CLOCK 25000000
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +010029
30/* SPI Control register */
31#define SPI_CTL_REG 0x000
32#define SPI_CTL_CS_POL_SHIFT 0
33#define SPI_CTL_CS_POL_MASK (0xff << SPI_CTL_CS_POL_SHIFT)
34#define SPI_CTL_CLK_GATE_SHIFT 16
35#define SPI_CTL_CLK_GATE_MASK (1 << SPI_CTL_CLK_GATE_SHIFT)
36#define SPI_CTL_CLK_POL_SHIFT 17
37#define SPI_CTL_CLK_POL_MASK (1 << SPI_CTL_CLK_POL_SHIFT)
38
39/* SPI Interrupts registers */
40#define SPI_IR_STAT_REG 0x008
41#define SPI_IR_ST_MASK_REG 0x00c
42#define SPI_IR_MASK_REG 0x010
43
44#define SPI_IR_CLEAR_ALL 0xff001f1f
45
46/* SPI Ping-Pong Command registers */
47#define SPI_CMD_REG (0x080 + (0x40 * (HSSPI_PP)) + 0x00)
48#define SPI_CMD_OP_SHIFT 0
49#define SPI_CMD_OP_START (0x1 << SPI_CMD_OP_SHIFT)
50#define SPI_CMD_PFL_SHIFT 8
51#define SPI_CMD_PFL_MASK (0x7 << SPI_CMD_PFL_SHIFT)
52#define SPI_CMD_SLAVE_SHIFT 12
53#define SPI_CMD_SLAVE_MASK (0x7 << SPI_CMD_SLAVE_SHIFT)
54
55/* SPI Ping-Pong Status registers */
56#define SPI_STAT_REG (0x080 + (0x40 * (HSSPI_PP)) + 0x04)
57#define SPI_STAT_SRCBUSY_SHIFT 1
58#define SPI_STAT_SRCBUSY_MASK (1 << SPI_STAT_SRCBUSY_SHIFT)
59
60/* SPI Profile Clock registers */
61#define SPI_PFL_CLK_REG(x) (0x100 + (0x20 * (x)) + 0x00)
62#define SPI_PFL_CLK_FREQ_SHIFT 0
63#define SPI_PFL_CLK_FREQ_MASK (0x3fff << SPI_PFL_CLK_FREQ_SHIFT)
64#define SPI_PFL_CLK_RSTLOOP_SHIFT 15
65#define SPI_PFL_CLK_RSTLOOP_MASK (1 << SPI_PFL_CLK_RSTLOOP_SHIFT)
66
67/* SPI Profile Signal registers */
68#define SPI_PFL_SIG_REG(x) (0x100 + (0x20 * (x)) + 0x04)
69#define SPI_PFL_SIG_LATCHRIS_SHIFT 12
70#define SPI_PFL_SIG_LATCHRIS_MASK (1 << SPI_PFL_SIG_LATCHRIS_SHIFT)
71#define SPI_PFL_SIG_LAUNCHRIS_SHIFT 13
72#define SPI_PFL_SIG_LAUNCHRIS_MASK (1 << SPI_PFL_SIG_LAUNCHRIS_SHIFT)
73#define SPI_PFL_SIG_ASYNCIN_SHIFT 16
74#define SPI_PFL_SIG_ASYNCIN_MASK (1 << SPI_PFL_SIG_ASYNCIN_SHIFT)
75
76/* SPI Profile Mode registers */
77#define SPI_PFL_MODE_REG(x) (0x100 + (0x20 * (x)) + 0x08)
78#define SPI_PFL_MODE_FILL_SHIFT 0
79#define SPI_PFL_MODE_FILL_MASK (0xff << SPI_PFL_MODE_FILL_SHIFT)
William Zhang9a0ff5d2023-06-07 16:37:04 -070080#define SPI_PFL_MODE_MDRDST_SHIFT 8
81#define SPI_PFL_MODE_MDWRST_SHIFT 12
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +010082#define SPI_PFL_MODE_MDRDSZ_SHIFT 16
83#define SPI_PFL_MODE_MDRDSZ_MASK (1 << SPI_PFL_MODE_MDRDSZ_SHIFT)
84#define SPI_PFL_MODE_MDWRSZ_SHIFT 18
85#define SPI_PFL_MODE_MDWRSZ_MASK (1 << SPI_PFL_MODE_MDWRSZ_SHIFT)
86#define SPI_PFL_MODE_3WIRE_SHIFT 20
87#define SPI_PFL_MODE_3WIRE_MASK (1 << SPI_PFL_MODE_3WIRE_SHIFT)
William Zhang9a0ff5d2023-06-07 16:37:04 -070088#define SPI_PFL_MODE_PREPCNT_SHIFT 24
89#define SPI_PFL_MODE_PREPCNT_MASK (4 << SPI_PFL_MODE_PREPCNT_SHIFT)
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +010090
91/* SPI Ping-Pong FIFO registers */
92#define HSSPI_FIFO_SIZE 0x200
93#define HSSPI_FIFO_BASE (0x200 + \
94 (HSSPI_FIFO_SIZE * HSSPI_PP))
95
96/* SPI Ping-Pong FIFO OP register */
97#define HSSPI_FIFO_OP_SIZE 0x2
98#define HSSPI_FIFO_OP_REG (HSSPI_FIFO_BASE + 0x00)
99#define HSSPI_FIFO_OP_BYTES_SHIFT 0
100#define HSSPI_FIFO_OP_BYTES_MASK (0x3ff << HSSPI_FIFO_OP_BYTES_SHIFT)
101#define HSSPI_FIFO_OP_MBIT_SHIFT 11
102#define HSSPI_FIFO_OP_MBIT_MASK (1 << HSSPI_FIFO_OP_MBIT_SHIFT)
103#define HSSPI_FIFO_OP_CODE_SHIFT 13
104#define HSSPI_FIFO_OP_READ_WRITE (1 << HSSPI_FIFO_OP_CODE_SHIFT)
105#define HSSPI_FIFO_OP_CODE_W (2 << HSSPI_FIFO_OP_CODE_SHIFT)
106#define HSSPI_FIFO_OP_CODE_R (3 << HSSPI_FIFO_OP_CODE_SHIFT)
107
William Zhang9a0ff5d2023-06-07 16:37:04 -0700108#define HSSPI_MAX_DATA_SIZE (HSSPI_FIFO_SIZE - HSSPI_FIFO_OP_SIZE)
109#define HSSPI_MAX_PREPEND_SIZE 15
110
111#define HSSPI_XFER_MODE_PREPEND 0
112#define HSSPI_XFER_MODE_DUMMYCS 1
113
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100114struct bcm63xx_hsspi_priv {
115 void __iomem *regs;
116 ulong clk_rate;
117 uint8_t num_cs;
118 uint8_t cs_pols;
119 uint speed;
William Zhang9a0ff5d2023-06-07 16:37:04 -0700120 uint xfer_mode;
121 uint32_t prepend_cnt;
122 uint8_t prepend_buf[HSSPI_MAX_PREPEND_SIZE];
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100123};
124
125static int bcm63xx_hsspi_cs_info(struct udevice *bus, uint cs,
126 struct spi_cs_info *info)
127{
128 struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus);
129
130 if (cs >= priv->num_cs) {
131 printf("no cs %u\n", cs);
Bin Mengf8586f62019-09-09 06:00:01 -0700132 return -EINVAL;
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100133 }
134
135 return 0;
136}
137
138static int bcm63xx_hsspi_set_mode(struct udevice *bus, uint mode)
139{
140 struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus);
141
142 /* clock polarity */
143 if (mode & SPI_CPOL)
Kursad Oney711c7302019-08-14 15:18:34 +0200144 setbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_POL_MASK);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100145 else
Kursad Oney711c7302019-08-14 15:18:34 +0200146 clrbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_POL_MASK);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100147
148 return 0;
149}
150
151static int bcm63xx_hsspi_set_speed(struct udevice *bus, uint speed)
152{
153 struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus);
154
155 priv->speed = speed;
156
157 return 0;
158}
159
160static void bcm63xx_hsspi_activate_cs(struct bcm63xx_hsspi_priv *priv,
Simon Glassb75b15b2020-12-03 16:55:23 -0700161 struct dm_spi_slave_plat *plat)
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100162{
163 uint32_t clr, set;
William Zhang9a0ff5d2023-06-07 16:37:04 -0700164 uint speed = priv->speed;
165
166 if (priv->xfer_mode == HSSPI_XFER_MODE_DUMMYCS &&
167 speed > SPI_MAX_SYNC_CLOCK) {
168 speed = SPI_MAX_SYNC_CLOCK;
169 debug("Force to dummy cs mode. Reduce the speed to %dHz\n", speed);
170 }
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100171
172 /* profile clock */
William Zhang9a0ff5d2023-06-07 16:37:04 -0700173 set = DIV_ROUND_UP(priv->clk_rate, speed);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100174 set = DIV_ROUND_UP(2048, set);
175 set &= SPI_PFL_CLK_FREQ_MASK;
176 set |= SPI_PFL_CLK_RSTLOOP_MASK;
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530177 writel(set, priv->regs + SPI_PFL_CLK_REG(plat->cs[0]));
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100178
179 /* profile signal */
180 set = 0;
181 clr = SPI_PFL_SIG_LAUNCHRIS_MASK |
182 SPI_PFL_SIG_LATCHRIS_MASK |
183 SPI_PFL_SIG_ASYNCIN_MASK;
184
185 /* latch/launch config */
186 if (plat->mode & SPI_CPHA)
187 set |= SPI_PFL_SIG_LAUNCHRIS_MASK;
188 else
189 set |= SPI_PFL_SIG_LATCHRIS_MASK;
190
191 /* async clk */
William Zhang9a0ff5d2023-06-07 16:37:04 -0700192 if (speed > SPI_MAX_SYNC_CLOCK)
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100193 set |= SPI_PFL_SIG_ASYNCIN_MASK;
194
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530195 clrsetbits_32(priv->regs + SPI_PFL_SIG_REG(plat->cs[0]), clr, set);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100196
197 /* global control */
198 set = 0;
199 clr = 0;
200
William Zhang9a0ff5d2023-06-07 16:37:04 -0700201 if (priv->xfer_mode == HSSPI_XFER_MODE_PREPEND) {
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530202 if (priv->cs_pols & BIT(plat->cs[0]))
203 set |= BIT(plat->cs[0]);
William Zhang9a0ff5d2023-06-07 16:37:04 -0700204 else
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530205 clr |= BIT(plat->cs[0]);
William Zhang9a0ff5d2023-06-07 16:37:04 -0700206 } else {
207 /* invert cs polarity */
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530208 if (priv->cs_pols & BIT(plat->cs[0]))
209 clr |= BIT(plat->cs[0]);
William Zhang9a0ff5d2023-06-07 16:37:04 -0700210 else
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530211 set |= BIT(plat->cs[0]);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100212
William Zhang9a0ff5d2023-06-07 16:37:04 -0700213 /* invert dummy cs polarity */
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530214 if (priv->cs_pols & BIT(!plat->cs[0]))
215 clr |= BIT(!plat->cs[0]);
William Zhang9a0ff5d2023-06-07 16:37:04 -0700216 else
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530217 set |= BIT(!plat->cs[0]);
William Zhang9a0ff5d2023-06-07 16:37:04 -0700218 }
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100219
Kursad Oney711c7302019-08-14 15:18:34 +0200220 clrsetbits_32(priv->regs + SPI_CTL_REG, clr, set);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100221}
222
223static void bcm63xx_hsspi_deactivate_cs(struct bcm63xx_hsspi_priv *priv)
224{
225 /* restore cs polarities */
Kursad Oney711c7302019-08-14 15:18:34 +0200226 clrsetbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CS_POL_MASK,
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100227 priv->cs_pols);
228}
229
230/*
231 * BCM63xx HSSPI driver doesn't allow keeping CS active between transfers
232 * because they are controlled by HW.
233 * However, it provides a mechanism to prepend write transfers prior to read
234 * transfers (with a maximum prepend of 15 bytes), which is usually enough for
235 * SPI-connected flashes since reading requires prepending a write transfer of
236 * 5 bytes. On the other hand it also provides a way to invert each CS
237 * polarity, not only between transfers like the older BCM63xx SPI driver, but
238 * also the rest of the time.
239 *
240 * Instead of using the prepend mechanism, this implementation inverts the
241 * polarity of both the desired CS and another dummy CS when the bus is
242 * claimed. This way, the dummy CS is restored to its inactive value when
243 * transfers are issued and the desired CS is preserved in its active value
244 * all the time. This hack is also used in the upstream linux driver and
Pengfei Fan746271d2022-12-09 09:39:50 +0800245 * allows keeping CS active between transfers even if the HW doesn't give
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100246 * this possibility.
William Zhang9a0ff5d2023-06-07 16:37:04 -0700247 *
248 * This workaround only works when the dummy CS (usually CS1 when the actual
249 * CS is 0) pinmuxed to SPI chip select function if SPI clock is faster than
250 * SPI_MAX_SYNC_CLOCK. In old broadcom chip, CS1 pin is default to chip select
251 * function. But this is not the case for new chips. To make this function
252 * always work, it should be called with maximum clock of SPI_MAX_SYNC_CLOCK.
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100253 */
William Zhang9a0ff5d2023-06-07 16:37:04 -0700254static int bcm63xx_hsspi_xfer_dummy_cs(struct udevice *dev, unsigned int data_bytes,
255 const void *dout, void *din, unsigned long flags)
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100256{
257 struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent);
Simon Glassb75b15b2020-12-03 16:55:23 -0700258 struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100259 size_t step_size = HSSPI_FIFO_SIZE;
260 uint16_t opcode = 0;
William Zhang76333332023-06-07 16:37:02 -0700261 uint32_t val = SPI_PFL_MODE_FILL_MASK;
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100262 const uint8_t *tx = dout;
263 uint8_t *rx = din;
264
265 if (flags & SPI_XFER_BEGIN)
266 bcm63xx_hsspi_activate_cs(priv, plat);
267
268 /* fifo operation */
269 if (tx && rx)
270 opcode = HSSPI_FIFO_OP_READ_WRITE;
271 else if (rx)
272 opcode = HSSPI_FIFO_OP_CODE_R;
273 else if (tx)
274 opcode = HSSPI_FIFO_OP_CODE_W;
275
276 if (opcode != HSSPI_FIFO_OP_CODE_R)
277 step_size -= HSSPI_FIFO_OP_SIZE;
278
279 /* dual mode */
William Zhang76333332023-06-07 16:37:02 -0700280 if ((opcode == HSSPI_FIFO_OP_CODE_R && (plat->mode & SPI_RX_DUAL)) ||
281 (opcode == HSSPI_FIFO_OP_CODE_W && (plat->mode & SPI_TX_DUAL))) {
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100282 opcode |= HSSPI_FIFO_OP_MBIT_MASK;
283
William Zhang76333332023-06-07 16:37:02 -0700284 /* profile mode */
285 if (plat->mode & SPI_RX_DUAL)
286 val |= SPI_PFL_MODE_MDRDSZ_MASK;
287 if (plat->mode & SPI_TX_DUAL)
288 val |= SPI_PFL_MODE_MDWRSZ_MASK;
289 }
290
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100291 if (plat->mode & SPI_3WIRE)
292 val |= SPI_PFL_MODE_3WIRE_MASK;
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530293 writel(val, priv->regs + SPI_PFL_MODE_REG(plat->cs[0]));
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100294
295 /* transfer loop */
296 while (data_bytes > 0) {
William Zhang9f3f08c2023-08-11 19:03:19 -0700297 size_t curr_step = min(step_size, (size_t)data_bytes);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100298 int ret;
299
300 /* copy tx data */
301 if (tx) {
302 memcpy_toio(priv->regs + HSSPI_FIFO_BASE +
303 HSSPI_FIFO_OP_SIZE, tx, curr_step);
304 tx += curr_step;
305 }
306
307 /* set fifo operation */
Kursad Oney711c7302019-08-14 15:18:34 +0200308 writew(cpu_to_be16(opcode | (curr_step & HSSPI_FIFO_OP_BYTES_MASK)),
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100309 priv->regs + HSSPI_FIFO_OP_REG);
310
311 /* issue the transfer */
312 val = SPI_CMD_OP_START;
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530313 val |= (plat->cs[0] << SPI_CMD_PFL_SHIFT) &
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100314 SPI_CMD_PFL_MASK;
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530315 val |= (!plat->cs[0] << SPI_CMD_SLAVE_SHIFT) &
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100316 SPI_CMD_SLAVE_MASK;
Kursad Oney711c7302019-08-14 15:18:34 +0200317 writel(val, priv->regs + SPI_CMD_REG);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100318
319 /* wait for completion */
Kursad Oney711c7302019-08-14 15:18:34 +0200320 ret = wait_for_bit_32(priv->regs + SPI_STAT_REG,
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100321 SPI_STAT_SRCBUSY_MASK, false,
322 1000, false);
323 if (ret) {
324 printf("interrupt timeout\n");
325 return ret;
326 }
327
328 /* copy rx data */
329 if (rx) {
330 memcpy_fromio(rx, priv->regs + HSSPI_FIFO_BASE,
331 curr_step);
332 rx += curr_step;
333 }
334
335 data_bytes -= curr_step;
336 }
337
338 if (flags & SPI_XFER_END)
339 bcm63xx_hsspi_deactivate_cs(priv);
340
341 return 0;
342}
343
William Zhang9a0ff5d2023-06-07 16:37:04 -0700344static int bcm63xx_prepare_prepend_transfer(struct bcm63xx_hsspi_priv *priv,
345 unsigned int data_bytes, const void *dout, void *din,
346 unsigned long flags)
347{
348 /*
349 * only support multiple half duplex write transfer + optional
350 * full duplex read/write at the end.
351 */
352 if (flags & SPI_XFER_BEGIN) {
353 /* clear prepends */
354 priv->prepend_cnt = 0;
355 }
356
357 if (din) {
358 /* buffering reads not possible for prepend mode */
359 if (!(flags & SPI_XFER_END)) {
360 debug("unable to buffer reads\n");
361 return HSSPI_XFER_MODE_DUMMYCS;
362 }
363
364 /* check rx size */
365 if (data_bytes > HSSPI_MAX_DATA_SIZE) {
366 debug("max rx bytes exceeded\n");
367 return HSSPI_XFER_MODE_DUMMYCS;
368 }
369 }
370
371 if (dout) {
372 /* check tx size */
373 if (flags & SPI_XFER_END) {
374 if (priv->prepend_cnt + data_bytes > HSSPI_MAX_DATA_SIZE) {
375 debug("max tx bytes exceeded\n");
376 return HSSPI_XFER_MODE_DUMMYCS;
377 }
378 } else {
379 if (priv->prepend_cnt + data_bytes > HSSPI_MAX_PREPEND_SIZE) {
380 debug("max prepend bytes exceeded\n");
381 return HSSPI_XFER_MODE_DUMMYCS;
382 }
383
384 /*
385 * buffer transfer data in the prepend buf in case we have to fall
386 * back to dummy cs mode.
387 */
388 memcpy(&priv->prepend_buf[priv->prepend_cnt], dout, data_bytes);
389 priv->prepend_cnt += data_bytes;
390 }
391 }
392
393 return HSSPI_XFER_MODE_PREPEND;
394}
395
396static int bcm63xx_hsspi_xfer_prepend(struct udevice *dev, unsigned int data_bytes,
397 const void *dout, void *din, unsigned long flags)
398{
399 struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent);
400 struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
401 uint16_t opcode = 0;
402 uint32_t val, offset;
403 int ret;
404
405 if (flags & SPI_XFER_END) {
406 offset = HSSPI_FIFO_BASE + HSSPI_FIFO_OP_SIZE;
407 if (priv->prepend_cnt) {
408 /* copy prepend data */
409 memcpy_toio(priv->regs + offset,
410 priv->prepend_buf, priv->prepend_cnt);
411 }
412
413 if (dout && data_bytes) {
414 /* copy tx data */
415 offset += priv->prepend_cnt;
416 memcpy_toio(priv->regs + offset, dout, data_bytes);
417 }
418
419 bcm63xx_hsspi_activate_cs(priv, plat);
420 if (dout && !din) {
421 /* all half-duplex write. merge to single write */
422 data_bytes += priv->prepend_cnt;
423 opcode = HSSPI_FIFO_OP_CODE_W;
424 priv->prepend_cnt = 0;
425 } else if (!dout && din) {
426 /* half-duplex read with prepend write */
427 opcode = HSSPI_FIFO_OP_CODE_R;
428 } else {
429 /* full duplex read/write */
430 opcode = HSSPI_FIFO_OP_READ_WRITE;
431 }
432
433 /* profile mode */
434 val = SPI_PFL_MODE_FILL_MASK;
435 if (plat->mode & SPI_3WIRE)
436 val |= SPI_PFL_MODE_3WIRE_MASK;
437
438 /* dual mode */
439 if ((opcode == HSSPI_FIFO_OP_CODE_R && (plat->mode & SPI_RX_DUAL)) ||
440 (opcode == HSSPI_FIFO_OP_CODE_W && (plat->mode & SPI_TX_DUAL))) {
441 opcode |= HSSPI_FIFO_OP_MBIT_MASK;
442
443 if (plat->mode & SPI_RX_DUAL) {
444 val |= SPI_PFL_MODE_MDRDSZ_MASK;
445 val |= priv->prepend_cnt << SPI_PFL_MODE_MDRDST_SHIFT;
446 }
447 if (plat->mode & SPI_TX_DUAL) {
448 val |= SPI_PFL_MODE_MDWRSZ_MASK;
449 val |= priv->prepend_cnt << SPI_PFL_MODE_MDWRST_SHIFT;
450 }
451 }
452 val |= (priv->prepend_cnt << SPI_PFL_MODE_PREPCNT_SHIFT);
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530453 writel(val, priv->regs + SPI_PFL_MODE_REG(plat->cs[0]));
William Zhang9a0ff5d2023-06-07 16:37:04 -0700454
455 /* set fifo operation */
456 val = opcode | (data_bytes & HSSPI_FIFO_OP_BYTES_MASK);
457 writew(cpu_to_be16(val),
458 priv->regs + HSSPI_FIFO_OP_REG);
459
460 /* issue the transfer */
461 val = SPI_CMD_OP_START;
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530462 val |= (plat->cs[0] << SPI_CMD_PFL_SHIFT) &
William Zhang9a0ff5d2023-06-07 16:37:04 -0700463 SPI_CMD_PFL_MASK;
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530464 val |= (plat->cs[0] << SPI_CMD_SLAVE_SHIFT) &
William Zhang9a0ff5d2023-06-07 16:37:04 -0700465 SPI_CMD_SLAVE_MASK;
466 writel(val, priv->regs + SPI_CMD_REG);
467
468 /* wait for completion */
469 ret = wait_for_bit_32(priv->regs + SPI_STAT_REG,
470 SPI_STAT_SRCBUSY_MASK, false,
471 1000, false);
472 if (ret) {
473 bcm63xx_hsspi_deactivate_cs(priv);
474 printf("spi polling timeout\n");
475 return ret;
476 }
477
478 /* copy rx data */
479 if (din)
480 memcpy_fromio(din, priv->regs + HSSPI_FIFO_BASE,
481 data_bytes);
482 bcm63xx_hsspi_deactivate_cs(priv);
483 }
484
485 return 0;
486}
487
488static int bcm63xx_hsspi_xfer(struct udevice *dev, unsigned int bitlen,
489 const void *dout, void *din, unsigned long flags)
490{
491 struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent);
492 int ret;
493 u32 data_bytes = bitlen >> 3;
494
495 if (priv->xfer_mode == HSSPI_XFER_MODE_PREPEND) {
496 priv->xfer_mode =
497 bcm63xx_prepare_prepend_transfer(priv, data_bytes, dout, din, flags);
498 }
499
500 /* if not prependable, fall back to dummy cs mode with safe clock */
501 if (priv->xfer_mode == HSSPI_XFER_MODE_DUMMYCS) {
502 /* For pending prepend data from previous transfers, send it first */
503 if (priv->prepend_cnt) {
504 bcm63xx_hsspi_xfer_dummy_cs(dev, priv->prepend_cnt,
505 priv->prepend_buf, NULL,
506 (flags & ~SPI_XFER_END) | SPI_XFER_BEGIN);
507 priv->prepend_cnt = 0;
508 }
509 ret = bcm63xx_hsspi_xfer_dummy_cs(dev, data_bytes, dout, din, flags);
510 } else {
511 ret = bcm63xx_hsspi_xfer_prepend(dev, data_bytes, dout, din, flags);
512 }
513
514 if (flags & SPI_XFER_END)
515 priv->xfer_mode = HSSPI_XFER_MODE_PREPEND;
516
517 return ret;
518}
519
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100520static const struct dm_spi_ops bcm63xx_hsspi_ops = {
521 .cs_info = bcm63xx_hsspi_cs_info,
522 .set_mode = bcm63xx_hsspi_set_mode,
523 .set_speed = bcm63xx_hsspi_set_speed,
524 .xfer = bcm63xx_hsspi_xfer,
525};
526
527static const struct udevice_id bcm63xx_hsspi_ids[] = {
528 { .compatible = "brcm,bcm6328-hsspi", },
William Zhang3f55f272023-06-07 16:37:03 -0700529 { .compatible = "brcm,bcmbca-hsspi-v1.0", },
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100530 { /* sentinel */ }
531};
532
533static int bcm63xx_hsspi_child_pre_probe(struct udevice *dev)
534{
535 struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent);
Simon Glassb75b15b2020-12-03 16:55:23 -0700536 struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
William Zhang9a0ff5d2023-06-07 16:37:04 -0700537 struct spi_slave *slave = dev_get_parent_priv(dev);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100538
539 /* check cs */
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530540 if (plat->cs[0] >= priv->num_cs) {
541 printf("no cs %u\n", plat->cs[0]);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100542 return -ENODEV;
543 }
544
545 /* cs polarity */
546 if (plat->mode & SPI_CS_HIGH)
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530547 priv->cs_pols |= BIT(plat->cs[0]);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100548 else
Venkatesh Yadav Abbarapu91b9e372024-09-26 10:25:05 +0530549 priv->cs_pols &= ~BIT(plat->cs[0]);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100550
William Zhang9a0ff5d2023-06-07 16:37:04 -0700551 /*
552 * set the max read/write size to make sure each xfer are within the
553 * prepend limit
554 */
555 slave->max_read_size = HSSPI_MAX_DATA_SIZE;
556 slave->max_write_size = HSSPI_MAX_DATA_SIZE;
557
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100558 return 0;
559}
560
561static int bcm63xx_hsspi_probe(struct udevice *dev)
562{
563 struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev);
564 struct reset_ctl rst_ctl;
565 struct clk clk;
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100566 int ret;
567
Álvaro Fernández Rojas25fb8aa2018-03-22 19:39:37 +0100568 priv->regs = dev_remap_addr(dev);
569 if (!priv->regs)
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100570 return -EINVAL;
571
Álvaro Fernández Rojas25fb8aa2018-03-22 19:39:37 +0100572 priv->num_cs = dev_read_u32_default(dev, "num-cs", 8);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100573
574 /* enable clock */
575 ret = clk_get_by_name(dev, "hsspi", &clk);
576 if (ret < 0)
577 return ret;
578
579 ret = clk_enable(&clk);
Kursad Oneye31d6f12019-08-14 15:18:35 +0200580 if (ret < 0 && ret != -ENOSYS)
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100581 return ret;
582
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100583 /* get clock rate */
584 ret = clk_get_by_name(dev, "pll", &clk);
Kursad Oneye31d6f12019-08-14 15:18:35 +0200585 if (ret < 0 && ret != -ENOSYS)
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100586 return ret;
587
588 priv->clk_rate = clk_get_rate(&clk);
589
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100590 /* perform reset */
591 ret = reset_get_by_index(dev, 0, &rst_ctl);
Kursad Oneye31d6f12019-08-14 15:18:35 +0200592 if (ret >= 0) {
593 ret = reset_deassert(&rst_ctl);
594 if (ret < 0)
595 return ret;
596 }
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100597
598 ret = reset_free(&rst_ctl);
599 if (ret < 0)
600 return ret;
601
602 /* initialize hardware */
Kursad Oney711c7302019-08-14 15:18:34 +0200603 writel(0, priv->regs + SPI_IR_MASK_REG);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100604
605 /* clear pending interrupts */
Kursad Oney711c7302019-08-14 15:18:34 +0200606 writel(SPI_IR_CLEAR_ALL, priv->regs + SPI_IR_STAT_REG);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100607
608 /* enable clk gate */
Kursad Oney711c7302019-08-14 15:18:34 +0200609 setbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_GATE_MASK);
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100610
611 /* read default cs polarities */
Kursad Oney711c7302019-08-14 15:18:34 +0200612 priv->cs_pols = readl(priv->regs + SPI_CTL_REG) &
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100613 SPI_CTL_CS_POL_MASK;
614
William Zhang9a0ff5d2023-06-07 16:37:04 -0700615 /* default in prepend mode */
616 priv->xfer_mode = HSSPI_XFER_MODE_PREPEND;
617
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100618 return 0;
619}
620
621U_BOOT_DRIVER(bcm63xx_hsspi) = {
622 .name = "bcm63xx_hsspi",
623 .id = UCLASS_SPI,
624 .of_match = bcm63xx_hsspi_ids,
625 .ops = &bcm63xx_hsspi_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700626 .priv_auto = sizeof(struct bcm63xx_hsspi_priv),
Álvaro Fernández Rojas55d96ec2018-01-20 02:13:38 +0100627 .child_pre_probe = bcm63xx_hsspi_child_pre_probe,
628 .probe = bcm63xx_hsspi_probe,
629};