blob: 2b2c31b4b3fb5caa1381327e3bd07fafec288ced [file] [log] [blame]
developer24202202022-09-09 19:59:45 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: SkyLake.Huang <skylake.huang@mediatek.com>
6 */
7
8#include <clk.h>
9#include <cpu_func.h>
10#include <div64.h>
11#include <dm.h>
12#include <spi.h>
13#include <spi-mem.h>
14#include <stdbool.h>
15#include <watchdog.h>
16#include <dm/device.h>
17#include <dm/device_compat.h>
18#include <dm/devres.h>
19#include <dm/pinctrl.h>
20#include <linux/bitops.h>
developer24202202022-09-09 19:59:45 +080021#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/iopoll.h>
Tom Rini5ba346a2022-10-28 20:27:08 -040024#include <linux/sizes.h>
developer24202202022-09-09 19:59:45 +080025
26#define SPI_CFG0_REG 0x0000
27#define SPI_CFG1_REG 0x0004
28#define SPI_TX_SRC_REG 0x0008
29#define SPI_RX_DST_REG 0x000c
30#define SPI_TX_DATA_REG 0x0010
31#define SPI_RX_DATA_REG 0x0014
32#define SPI_CMD_REG 0x0018
33#define SPI_IRQ_REG 0x001c
34#define SPI_STATUS_REG 0x0020
35#define SPI_PAD_SEL_REG 0x0024
36#define SPI_CFG2_REG 0x0028
37#define SPI_TX_SRC_REG_64 0x002c
38#define SPI_RX_DST_REG_64 0x0030
39#define SPI_CFG3_IPM_REG 0x0040
40
41#define SPI_CFG0_SCK_HIGH_OFFSET 0
42#define SPI_CFG0_SCK_LOW_OFFSET 8
43#define SPI_CFG0_CS_HOLD_OFFSET 16
44#define SPI_CFG0_CS_SETUP_OFFSET 24
45#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
46#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
47
48#define SPI_CFG1_CS_IDLE_OFFSET 0
49#define SPI_CFG1_PACKET_LOOP_OFFSET 8
50#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
51#define SPI_CFG1_GET_TICKDLY_OFFSET 29
52
53#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
54#define SPI_CFG1_CS_IDLE_MASK 0xff
55#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
56#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
57#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
58#define SPI_CFG2_SCK_HIGH_OFFSET 0
59#define SPI_CFG2_SCK_LOW_OFFSET 16
60#define SPI_CFG2_SCK_HIGH_MASK GENMASK(15, 0)
61#define SPI_CFG2_SCK_LOW_MASK GENMASK(31, 16)
62
63#define SPI_CMD_ACT BIT(0)
64#define SPI_CMD_RESUME BIT(1)
65#define SPI_CMD_RST BIT(2)
66#define SPI_CMD_PAUSE_EN BIT(4)
67#define SPI_CMD_DEASSERT BIT(5)
68#define SPI_CMD_SAMPLE_SEL BIT(6)
69#define SPI_CMD_CS_POL BIT(7)
70#define SPI_CMD_CPHA BIT(8)
71#define SPI_CMD_CPOL BIT(9)
72#define SPI_CMD_RX_DMA BIT(10)
73#define SPI_CMD_TX_DMA BIT(11)
74#define SPI_CMD_TXMSBF BIT(12)
75#define SPI_CMD_RXMSBF BIT(13)
76#define SPI_CMD_RX_ENDIAN BIT(14)
77#define SPI_CMD_TX_ENDIAN BIT(15)
78#define SPI_CMD_FINISH_IE BIT(16)
79#define SPI_CMD_PAUSE_IE BIT(17)
80#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
81#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
82#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
83
84#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
85
86#define PIN_MODE_CFG(x) ((x) / 2)
87
88#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
89#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
90#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
91#define SPI_CFG3_IPM_XMODE_EN BIT(4)
92#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
93#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
94#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
95#define SPI_CFG3_IPM_DUMMY_BYTELEN_OFFSET 16
96
97#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
98#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
99#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
100#define SPI_CFG3_IPM_DUMMY_BYTELEN_MASK GENMASK(19, 16)
101
102#define MT8173_SPI_MAX_PAD_SEL 3
103
104#define MTK_SPI_PAUSE_INT_STATUS 0x2
105
106#define MTK_SPI_IDLE 0
107#define MTK_SPI_PAUSED 1
108
109#define MTK_SPI_MAX_FIFO_SIZE 32U
110#define MTK_SPI_PACKET_SIZE 1024
111#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
112#define MTK_SPI_IPM_PACKET_LOOP SZ_256
113
114#define MTK_SPI_32BITS_MASK 0xffffffff
115
116#define DMA_ADDR_EXT_BITS 36
117#define DMA_ADDR_DEF_BITS 32
118
119#define CLK_TO_US(freq, clkcnt) DIV_ROUND_UP((clkcnt), (freq) / 1000000)
120
121/* struct mtk_spim_capability
122 * @enhance_timing: Some IC design adjust cfg register to enhance time accuracy
123 * @dma_ext: Some IC support DMA addr extension
124 * @ipm_design: The IPM IP design improves some features, and supports dual/quad mode
125 * @support_quad: Whether quad mode is supported
126 */
127struct mtk_spim_capability {
128 bool enhance_timing;
129 bool dma_ext;
130 bool ipm_design;
131 bool support_quad;
132};
133
134/* struct mtk_spim_priv
135 * @base: Base address of the spi controller
136 * @state: Controller state
137 * @sel_clk: Pad clock
138 * @spi_clk: Core clock
Christian Marangi26c04f22024-06-24 23:03:29 +0200139 * @parent_clk: Parent clock (needed for mediatek,spi-ipm, upstream DTSI)
140 * @hclk: HCLK clock (needed for mediatek,spi-ipm, upstream DTSI)
developer9f5fbaf2023-07-19 17:15:54 +0800141 * @pll_clk_rate: Controller's PLL source clock rate, which is different
142 * from SPI bus clock rate
developer24202202022-09-09 19:59:45 +0800143 * @xfer_len: Current length of data for transfer
144 * @hw_cap: Controller capabilities
145 * @tick_dly: Used to postpone SPI sampling time
146 * @sample_sel: Sample edge of MISO
147 * @dev: udevice of this spi controller
148 * @tx_dma: Tx DMA address
149 * @rx_dma: Rx DMA address
150 */
151struct mtk_spim_priv {
152 void __iomem *base;
153 u32 state;
154 struct clk sel_clk, spi_clk;
Christian Marangi26c04f22024-06-24 23:03:29 +0200155 struct clk parent_clk, hclk;
developer9f5fbaf2023-07-19 17:15:54 +0800156 u32 pll_clk_rate;
developer24202202022-09-09 19:59:45 +0800157 u32 xfer_len;
158 struct mtk_spim_capability hw_cap;
159 u32 tick_dly;
160 u32 sample_sel;
161
162 struct device *dev;
163 dma_addr_t tx_dma;
164 dma_addr_t rx_dma;
165};
166
167static void mtk_spim_reset(struct mtk_spim_priv *priv)
168{
169 /* set the software reset bit in SPI_CMD_REG. */
170 setbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
171 clrbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
172}
173
174static int mtk_spim_hw_init(struct spi_slave *slave)
175{
176 struct udevice *bus = dev_get_parent(slave->dev);
177 struct mtk_spim_priv *priv = dev_get_priv(bus);
178 u16 cpha, cpol;
179 u32 reg_val;
180
181 cpha = slave->mode & SPI_CPHA ? 1 : 0;
182 cpol = slave->mode & SPI_CPOL ? 1 : 0;
183
184 if (priv->hw_cap.enhance_timing) {
185 if (priv->hw_cap.ipm_design) {
186 /* CFG3 reg only used for spi-mem,
187 * here write to default value
188 */
189 writel(0x0, priv->base + SPI_CFG3_IPM_REG);
190 clrsetbits_le32(priv->base + SPI_CMD_REG,
191 SPI_CMD_IPM_GET_TICKDLY_MASK,
192 priv->tick_dly <<
193 SPI_CMD_IPM_GET_TICKDLY_OFFSET);
194 } else {
195 clrsetbits_le32(priv->base + SPI_CFG1_REG,
196 SPI_CFG1_GET_TICKDLY_MASK,
197 priv->tick_dly <<
198 SPI_CFG1_GET_TICKDLY_OFFSET);
199 }
200 }
201
202 reg_val = readl(priv->base + SPI_CMD_REG);
203 if (priv->hw_cap.ipm_design) {
204 /* SPI transfer without idle time until packet length done */
205 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
206 if (slave->mode & SPI_LOOP)
207 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
208 else
209 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
210 }
211
212 if (cpha)
213 reg_val |= SPI_CMD_CPHA;
214 else
215 reg_val &= ~SPI_CMD_CPHA;
216 if (cpol)
217 reg_val |= SPI_CMD_CPOL;
218 else
219 reg_val &= ~SPI_CMD_CPOL;
220
221 /* set the mlsbx and mlsbtx */
222 if (slave->mode & SPI_LSB_FIRST) {
223 reg_val &= ~SPI_CMD_TXMSBF;
224 reg_val &= ~SPI_CMD_RXMSBF;
225 } else {
226 reg_val |= SPI_CMD_TXMSBF;
227 reg_val |= SPI_CMD_RXMSBF;
228 }
229
230 /* do not reverse tx/rx endian */
231 reg_val &= ~SPI_CMD_TX_ENDIAN;
232 reg_val &= ~SPI_CMD_RX_ENDIAN;
233
234 if (priv->hw_cap.enhance_timing) {
235 /* set CS polarity */
236 if (slave->mode & SPI_CS_HIGH)
237 reg_val |= SPI_CMD_CS_POL;
238 else
239 reg_val &= ~SPI_CMD_CS_POL;
240
241 if (priv->sample_sel)
242 reg_val |= SPI_CMD_SAMPLE_SEL;
243 else
244 reg_val &= ~SPI_CMD_SAMPLE_SEL;
245 }
246
developer92aef702023-07-19 17:16:02 +0800247 /* Disable interrupt enable for pause mode & normal mode */
248 reg_val &= ~(SPI_CMD_PAUSE_IE | SPI_CMD_FINISH_IE);
249
developer24202202022-09-09 19:59:45 +0800250 /* disable dma mode */
251 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
252
253 /* disable deassert mode */
254 reg_val &= ~SPI_CMD_DEASSERT;
255
256 writel(reg_val, priv->base + SPI_CMD_REG);
257
258 return 0;
259}
260
261static void mtk_spim_prepare_transfer(struct mtk_spim_priv *priv,
262 u32 speed_hz)
263{
developer9f5fbaf2023-07-19 17:15:54 +0800264 u32 div, sck_time, cs_time, reg_val;
developer24202202022-09-09 19:59:45 +0800265
developer9f5fbaf2023-07-19 17:15:54 +0800266 if (speed_hz <= priv->pll_clk_rate / 4)
267 div = DIV_ROUND_UP(priv->pll_clk_rate, speed_hz);
developer24202202022-09-09 19:59:45 +0800268 else
269 div = 4;
270
271 sck_time = (div + 1) / 2;
272 cs_time = sck_time * 2;
273
274 if (priv->hw_cap.enhance_timing) {
275 reg_val = ((sck_time - 1) & 0xffff)
276 << SPI_CFG2_SCK_HIGH_OFFSET;
277 reg_val |= ((sck_time - 1) & 0xffff)
278 << SPI_CFG2_SCK_LOW_OFFSET;
279 writel(reg_val, priv->base + SPI_CFG2_REG);
280
281 reg_val = ((cs_time - 1) & 0xffff)
282 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET;
283 reg_val |= ((cs_time - 1) & 0xffff)
284 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET;
285 writel(reg_val, priv->base + SPI_CFG0_REG);
286 } else {
287 reg_val = ((sck_time - 1) & 0xff)
288 << SPI_CFG0_SCK_HIGH_OFFSET;
289 reg_val |= ((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET;
290 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET;
291 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET;
292 writel(reg_val, priv->base + SPI_CFG0_REG);
293 }
294
295 reg_val = readl(priv->base + SPI_CFG1_REG);
296 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
297 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET;
298 writel(reg_val, priv->base + SPI_CFG1_REG);
299}
300
301/**
302 * mtk_spim_setup_packet() - setup packet format.
303 * @priv: controller priv
304 *
305 * This controller sents/receives data in packets. The packet size is
306 * configurable.
307 *
308 * This function calculates the maximum packet size available for current
309 * data, and calculates the number of packets required to sent/receive data
310 * as much as possible.
311 */
312static void mtk_spim_setup_packet(struct mtk_spim_priv *priv)
313{
314 u32 packet_size, packet_loop, reg_val;
315
316 /* Calculate maximum packet size */
317 if (priv->hw_cap.ipm_design)
318 packet_size = min_t(u32,
319 priv->xfer_len,
320 MTK_SPI_IPM_PACKET_SIZE);
321 else
322 packet_size = min_t(u32,
323 priv->xfer_len,
324 MTK_SPI_PACKET_SIZE);
325
326 /* Calculates number of packets to sent/receive */
327 packet_loop = priv->xfer_len / packet_size;
328
329 reg_val = readl(priv->base + SPI_CFG1_REG);
330 if (priv->hw_cap.ipm_design)
331 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
332 else
333 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
334
335 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
336
337 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
338
339 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
340
341 writel(reg_val, priv->base + SPI_CFG1_REG);
342}
343
344static void mtk_spim_enable_transfer(struct mtk_spim_priv *priv)
345{
346 u32 cmd;
347
348 cmd = readl(priv->base + SPI_CMD_REG);
349 if (priv->state == MTK_SPI_IDLE)
350 cmd |= SPI_CMD_ACT;
351 else
352 cmd |= SPI_CMD_RESUME;
353 writel(cmd, priv->base + SPI_CMD_REG);
354}
355
356static bool mtk_spim_supports_op(struct spi_slave *slave,
357 const struct spi_mem_op *op)
358{
359 struct udevice *bus = dev_get_parent(slave->dev);
360 struct mtk_spim_priv *priv = dev_get_priv(bus);
361
developer088a5f92025-01-17 17:17:55 +0800362 if (!spi_mem_default_supports_op(slave, op))
363 return false;
364
developer24202202022-09-09 19:59:45 +0800365 if (op->cmd.buswidth == 0 || op->cmd.buswidth > 4 ||
366 op->addr.buswidth > 4 || op->dummy.buswidth > 4 ||
367 op->data.buswidth > 4)
368 return false;
369
370 if (!priv->hw_cap.support_quad && (op->cmd.buswidth > 2 ||
371 op->addr.buswidth > 2 || op->dummy.buswidth > 2 ||
372 op->data.buswidth > 2))
373 return false;
374
375 if (op->addr.nbytes && op->dummy.nbytes &&
376 op->addr.buswidth != op->dummy.buswidth)
377 return false;
378
379 if (op->addr.nbytes + op->dummy.nbytes > 16)
380 return false;
381
382 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
383 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
384 MTK_SPI_IPM_PACKET_LOOP ||
385 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
386 return false;
387 }
388
389 return true;
390}
391
392static void mtk_spim_setup_dma_xfer(struct mtk_spim_priv *priv,
393 const struct spi_mem_op *op)
394{
395 writel((u32)(priv->tx_dma & MTK_SPI_32BITS_MASK),
396 priv->base + SPI_TX_SRC_REG);
397
398 if (priv->hw_cap.dma_ext)
399 writel((u32)(priv->tx_dma >> 32),
400 priv->base + SPI_TX_SRC_REG_64);
401
402 if (op->data.dir == SPI_MEM_DATA_IN) {
403 writel((u32)(priv->rx_dma & MTK_SPI_32BITS_MASK),
404 priv->base + SPI_RX_DST_REG);
405
406 if (priv->hw_cap.dma_ext)
407 writel((u32)(priv->rx_dma >> 32),
408 priv->base + SPI_RX_DST_REG_64);
409 }
410}
411
412static int mtk_spim_transfer_wait(struct spi_slave *slave,
413 const struct spi_mem_op *op)
414{
415 struct udevice *bus = dev_get_parent(slave->dev);
416 struct mtk_spim_priv *priv = dev_get_priv(bus);
Nicolò Veronese892d7772023-10-04 00:14:26 +0200417 u32 pll_clk, sck_l, sck_h, clk_count, reg;
developer24202202022-09-09 19:59:45 +0800418 ulong us = 1;
419 int ret = 0;
420
421 if (op->data.dir == SPI_MEM_NO_DATA)
422 clk_count = 32;
423 else
424 clk_count = op->data.nbytes;
425
Nicolò Veronese892d7772023-10-04 00:14:26 +0200426 pll_clk = priv->pll_clk_rate;
developer24202202022-09-09 19:59:45 +0800427 sck_l = readl(priv->base + SPI_CFG2_REG) >> SPI_CFG2_SCK_LOW_OFFSET;
428 sck_h = readl(priv->base + SPI_CFG2_REG) & SPI_CFG2_SCK_HIGH_MASK;
Nicolò Veronese892d7772023-10-04 00:14:26 +0200429 do_div(pll_clk, sck_l + sck_h + 2);
developer24202202022-09-09 19:59:45 +0800430
Nicolò Veronese892d7772023-10-04 00:14:26 +0200431 us = CLK_TO_US(pll_clk, clk_count * 8);
developer24202202022-09-09 19:59:45 +0800432 us += 1000 * 1000; /* 1s tolerance */
433
434 if (us > UINT_MAX)
435 us = UINT_MAX;
436
437 ret = readl_poll_timeout(priv->base + SPI_STATUS_REG, reg,
438 reg & 0x1, us);
439 if (ret < 0) {
440 dev_err(priv->dev, "transfer timeout, val: 0x%lx\n", us);
441 return -ETIMEDOUT;
442 }
443
444 return 0;
445}
446
447static int mtk_spim_exec_op(struct spi_slave *slave,
448 const struct spi_mem_op *op)
449{
450 struct udevice *bus = dev_get_parent(slave->dev);
451 struct mtk_spim_priv *priv = dev_get_priv(bus);
452 u32 reg_val, nio = 1, tx_size;
453 char *tx_tmp_buf;
454 char *rx_tmp_buf;
455 int i, ret = 0;
456
457 mtk_spim_reset(priv);
458 mtk_spim_hw_init(slave);
459 mtk_spim_prepare_transfer(priv, slave->max_hz);
460
461 reg_val = readl(priv->base + SPI_CFG3_IPM_REG);
462 /* opcode byte len */
463 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
464 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
465
466 /* addr & dummy byte len */
467 if (op->addr.nbytes || op->dummy.nbytes)
468 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
469 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
470
471 /* data byte len */
472 if (!op->data.nbytes) {
473 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
474 writel(0, priv->base + SPI_CFG1_REG);
475 } else {
476 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
477 priv->xfer_len = op->data.nbytes;
478 mtk_spim_setup_packet(priv);
479 }
480
481 if (op->addr.nbytes || op->dummy.nbytes) {
482 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
483 reg_val |= SPI_CFG3_IPM_XMODE_EN;
484 else
485 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
486 }
487
488 if (op->addr.buswidth == 2 ||
489 op->dummy.buswidth == 2 ||
490 op->data.buswidth == 2)
491 nio = 2;
492 else if (op->addr.buswidth == 4 ||
493 op->dummy.buswidth == 4 ||
494 op->data.buswidth == 4)
495 nio = 4;
496
497 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
498 reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
499
500 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
501 if (op->data.dir == SPI_MEM_DATA_IN)
502 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
503 else
504 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
505 writel(reg_val, priv->base + SPI_CFG3_IPM_REG);
506
507 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
508 if (op->data.dir == SPI_MEM_DATA_OUT)
509 tx_size += op->data.nbytes;
510
511 tx_size = max(tx_size, (u32)32);
512
513 /* Fill up tx data */
514 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL);
515 if (!tx_tmp_buf) {
516 ret = -ENOMEM;
517 goto exit;
518 }
519
520 tx_tmp_buf[0] = op->cmd.opcode;
521
522 if (op->addr.nbytes) {
523 for (i = 0; i < op->addr.nbytes; i++)
524 tx_tmp_buf[i + 1] = op->addr.val >>
525 (8 * (op->addr.nbytes - i - 1));
526 }
527
528 if (op->dummy.nbytes)
529 memset(tx_tmp_buf + op->addr.nbytes + 1, 0xff,
530 op->dummy.nbytes);
531
532 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
533 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
534 op->data.buf.out, op->data.nbytes);
535 /* Finish filling up tx data */
536
537 priv->tx_dma = dma_map_single(tx_tmp_buf, tx_size, DMA_TO_DEVICE);
538 if (dma_mapping_error(priv->dev, priv->tx_dma)) {
539 ret = -ENOMEM;
540 goto tx_free;
541 }
542
543 if (op->data.dir == SPI_MEM_DATA_IN) {
544 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
545 rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL);
546 if (!rx_tmp_buf) {
547 ret = -ENOMEM;
548 goto tx_unmap;
549 }
550 } else {
551 rx_tmp_buf = op->data.buf.in;
552 }
553
554 priv->rx_dma = dma_map_single(rx_tmp_buf, op->data.nbytes,
555 DMA_FROM_DEVICE);
556 if (dma_mapping_error(priv->dev, priv->rx_dma)) {
557 ret = -ENOMEM;
558 goto rx_free;
559 }
560 }
561
562 reg_val = readl(priv->base + SPI_CMD_REG);
563 reg_val |= SPI_CMD_TX_DMA;
564 if (op->data.dir == SPI_MEM_DATA_IN)
565 reg_val |= SPI_CMD_RX_DMA;
566
567 writel(reg_val, priv->base + SPI_CMD_REG);
568
569 mtk_spim_setup_dma_xfer(priv, op);
570
571 mtk_spim_enable_transfer(priv);
572
573 /* Wait for the interrupt. */
574 ret = mtk_spim_transfer_wait(slave, op);
575 if (ret)
576 goto rx_unmap;
577
578 if (op->data.dir == SPI_MEM_DATA_IN &&
579 !IS_ALIGNED((size_t)op->data.buf.in, 4))
580 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
581
582rx_unmap:
583 /* spi disable dma */
584 reg_val = readl(priv->base + SPI_CMD_REG);
585 reg_val &= ~SPI_CMD_TX_DMA;
586 if (op->data.dir == SPI_MEM_DATA_IN)
587 reg_val &= ~SPI_CMD_RX_DMA;
588 writel(reg_val, priv->base + SPI_CMD_REG);
589
590 writel(0, priv->base + SPI_TX_SRC_REG);
591 writel(0, priv->base + SPI_RX_DST_REG);
592
593 if (op->data.dir == SPI_MEM_DATA_IN)
594 dma_unmap_single(priv->rx_dma,
595 op->data.nbytes, DMA_FROM_DEVICE);
596rx_free:
597 if (op->data.dir == SPI_MEM_DATA_IN &&
598 !IS_ALIGNED((size_t)op->data.buf.in, 4))
599 kfree(rx_tmp_buf);
600tx_unmap:
601 dma_unmap_single(priv->tx_dma,
602 tx_size, DMA_TO_DEVICE);
603tx_free:
604 kfree(tx_tmp_buf);
605exit:
606 return ret;
607}
608
609static int mtk_spim_adjust_op_size(struct spi_slave *slave,
610 struct spi_mem_op *op)
611{
612 int opcode_len;
613
614 if (!op->data.nbytes)
615 return 0;
616
617 if (op->data.dir != SPI_MEM_NO_DATA) {
618 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
619 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
620 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
621 /* force data buffer dma-aligned. */
622 op->data.nbytes -= op->data.nbytes % 4;
623 }
624 }
625
626 return 0;
627}
628
629static int mtk_spim_get_attr(struct mtk_spim_priv *priv, struct udevice *dev)
630{
631 int ret;
632
633 priv->hw_cap.enhance_timing = dev_read_bool(dev, "enhance_timing");
634 priv->hw_cap.dma_ext = dev_read_bool(dev, "dma_ext");
635 priv->hw_cap.ipm_design = dev_read_bool(dev, "ipm_design");
636 priv->hw_cap.support_quad = dev_read_bool(dev, "support_quad");
637
638 ret = dev_read_u32(dev, "tick_dly", &priv->tick_dly);
639 if (ret < 0)
640 dev_err(priv->dev, "tick dly not set.\n");
641
642 ret = dev_read_u32(dev, "sample_sel", &priv->sample_sel);
643 if (ret < 0)
644 dev_err(priv->dev, "sample sel not set.\n");
645
646 return ret;
647}
648
649static int mtk_spim_probe(struct udevice *dev)
650{
651 struct mtk_spim_priv *priv = dev_get_priv(dev);
652 int ret;
653
developer1af62222025-01-17 17:17:51 +0800654 priv->base = dev_read_addr_ptr(dev);
developer24202202022-09-09 19:59:45 +0800655 if (!priv->base)
656 return -EINVAL;
657
Christian Marangi26c04f22024-06-24 23:03:29 +0200658 /*
659 * Upstream linux driver for ipm design enable all the modes
660 * and setup the calibrarion values directly in the driver with
661 * standard values.
662 */
663 if (device_is_compatible(dev, "mediatek,spi-ipm")) {
664 priv->hw_cap.enhance_timing = true;
665 priv->hw_cap.dma_ext = true;
666 priv->hw_cap.ipm_design = true;
667 priv->hw_cap.support_quad = true;
668 priv->sample_sel = 0;
669 priv->tick_dly = 2;
670 } else {
671 mtk_spim_get_attr(priv, dev);
672 }
developer24202202022-09-09 19:59:45 +0800673
674 ret = clk_get_by_name(dev, "sel-clk", &priv->sel_clk);
675 if (ret < 0) {
676 dev_err(dev, "failed to get sel-clk\n");
677 return ret;
678 }
679
680 ret = clk_get_by_name(dev, "spi-clk", &priv->spi_clk);
681 if (ret < 0) {
682 dev_err(dev, "failed to get spi-clk\n");
683 return ret;
684 }
685
Christian Marangi26c04f22024-06-24 23:03:29 +0200686 /*
687 * Upstream DTSI use a different compatible that provide additional
688 * clock instead of the assigned-clock implementation.
689 */
690 if (device_is_compatible(dev, "mediatek,spi-ipm")) {
691 ret = clk_get_by_name(dev, "parent-clk", &priv->parent_clk);
692 if (ret < 0) {
693 dev_err(dev, "failed to get parent-clk\n");
694 return ret;
695 }
696
697 ret = clk_get_by_name(dev, "hclk", &priv->hclk);
698 if (ret < 0) {
699 dev_err(dev, "failed to get hclk\n");
700 return ret;
701 }
702
703 clk_enable(&priv->parent_clk);
704 clk_set_parent(&priv->sel_clk, &priv->parent_clk);
705
706 clk_enable(&priv->hclk);
707 }
708
developer24202202022-09-09 19:59:45 +0800709 clk_enable(&priv->spi_clk);
Christian Marangi26c04f22024-06-24 23:03:29 +0200710 clk_enable(&priv->sel_clk);
developer24202202022-09-09 19:59:45 +0800711
developer9f5fbaf2023-07-19 17:15:54 +0800712 priv->pll_clk_rate = clk_get_rate(&priv->spi_clk);
713 if (priv->pll_clk_rate == 0)
714 return -EINVAL;
715
developer24202202022-09-09 19:59:45 +0800716 return 0;
717}
718
719static int mtk_spim_set_speed(struct udevice *dev, uint speed)
720{
721 return 0;
722}
723
724static int mtk_spim_set_mode(struct udevice *dev, uint mode)
725{
726 return 0;
727}
728
729static const struct spi_controller_mem_ops mtk_spim_mem_ops = {
730 .adjust_op_size = mtk_spim_adjust_op_size,
731 .supports_op = mtk_spim_supports_op,
732 .exec_op = mtk_spim_exec_op
733};
734
735static const struct dm_spi_ops mtk_spim_ops = {
736 .mem_ops = &mtk_spim_mem_ops,
737 .set_speed = mtk_spim_set_speed,
738 .set_mode = mtk_spim_set_mode,
739};
740
741static const struct udevice_id mtk_spim_ids[] = {
742 { .compatible = "mediatek,ipm-spi" },
Christian Marangi26c04f22024-06-24 23:03:29 +0200743 { .compatible = "mediatek,spi-ipm", },
developer24202202022-09-09 19:59:45 +0800744 {}
745};
746
747U_BOOT_DRIVER(mtk_spim) = {
748 .name = "mtk_spim",
749 .id = UCLASS_SPI,
750 .of_match = mtk_spim_ids,
751 .ops = &mtk_spim_ops,
752 .priv_auto = sizeof(struct mtk_spim_priv),
753 .probe = mtk_spim_probe,
754};