blob: 48e718788935a2721c9915659e9e321a40a2a538 [file] [log] [blame]
developer5d148cb2023-06-02 13:08:11 +08001From f8707b14c517f9402456c3c1e98f64d0eb8a4af5 Mon Sep 17 00:00:00 2001
2From: Sam Shih <sam.shih@mediatek.com>
3Date: Fri, 2 Jun 2023 13:06:21 +0800
4Subject: [PATCH] [spi-and-storage][999-2375-spi-update-driver.patch]
5
6---
7 drivers/spi/spi-mt65xx.c | 414 ++++++++++++++++++++++-----------------
8 1 file changed, 231 insertions(+), 183 deletions(-)
9
10diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
11index 2034d1979..b80f8dcd9 100644
developer161b0452023-03-20 11:07:42 +080012--- a/drivers/spi/spi-mt65xx.c
13+++ b/drivers/spi/spi-mt65xx.c
14@@ -12,7 +12,7 @@
15 #include <linux/ioport.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18-#include <linux/of_gpio.h>
19+#include <linux/gpio/consumer.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/spi/spi.h>
23@@ -43,9 +43,11 @@
24 #define SPI_CFG1_CS_IDLE_OFFSET 0
25 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
26 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
27-#define SPI_CFG1_GET_TICKDLY_OFFSET 29
28+#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
29+#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
30
31-#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
32+#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
33+#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
34 #define SPI_CFG1_CS_IDLE_MASK 0xff
35 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
36 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
37@@ -78,7 +80,6 @@
38
39 #define PIN_MODE_CFG(x) ((x) / 2)
40
41-#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
42 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
43 #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
44 #define SPI_CFG3_IPM_XMODE_EN BIT(4)
45@@ -94,14 +95,14 @@
46
47 #define MTK_SPI_PAUSE_INT_STATUS 0x2
48
49-#define MTK_SPI_IDLE 0
50-#define MTK_SPI_PAUSED 1
51-
52 #define MTK_SPI_MAX_FIFO_SIZE 32U
53 #define MTK_SPI_PACKET_SIZE 1024
54 #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
55 #define MTK_SPI_IPM_PACKET_LOOP SZ_256
56
57+#define MTK_SPI_IDLE 0
58+#define MTK_SPI_PAUSED 1
59+
60 #define MTK_SPI_32BITS_MASK (0xffffffff)
61
62 #define DMA_ADDR_EXT_BITS (36)
63@@ -115,11 +116,8 @@ struct mtk_spi_compatible {
64 bool enhance_timing;
65 /* some IC support DMA addr extension */
66 bool dma_ext;
67- /* the IPM IP design improve some feature, and support dual/quad mode */
68+ bool no_need_unprepare;
69 bool ipm_design;
70- bool support_quad;
71- /* some IC ahb & apb clk is different and also need to be enabled */
72- bool need_ahb_clk;
73 };
74
75 struct mtk_spi_config {
76@@ -140,7 +138,7 @@ struct mtk_spi {
77 u32 tx_sgl_len, rx_sgl_len;
78 const struct mtk_spi_compatible *dev_comp;
79 struct mtk_spi_config dev_config;
80-
81+ u32 spi_clk_hz;
82 struct completion spimem_done;
83 bool use_spimem;
84 struct device *dev;
developer5d148cb2023-06-02 13:08:11 +080085@@ -154,21 +152,10 @@ static const struct mtk_spi_compatible mt2712_compat = {
developer161b0452023-03-20 11:07:42 +080086 .must_tx = true,
87 };
88
89-static const struct mtk_spi_compatible ipm_compat_single = {
90- .must_tx = true,
91+static const struct mtk_spi_compatible mtk_ipm_compat = {
92 .enhance_timing = true,
93 .dma_ext = true,
94 .ipm_design = true,
95- .need_ahb_clk = true,
96-};
97-
98-static const struct mtk_spi_compatible ipm_compat_quad = {
99- .must_tx = true,
100- .enhance_timing = true,
101- .dma_ext = true,
102- .ipm_design = true,
103- .support_quad = true,
104- .need_ahb_clk = true,
105 };
106
107 static const struct mtk_spi_compatible mt6765_compat = {
developer5d148cb2023-06-02 13:08:11 +0800108@@ -194,13 +181,25 @@ static const struct mtk_spi_compatible mt8183_compat = {
developer161b0452023-03-20 11:07:42 +0800109 .enhance_timing = true,
110 };
111
112+static const struct mtk_spi_compatible mt6893_compat = {
113+ .need_pad_sel = true,
114+ .must_tx = true,
115+ .enhance_timing = true,
116+ .dma_ext = true,
117+ .no_need_unprepare = true,
118+};
119+
120 static const struct of_device_id mtk_spi_of_match[] = {
121+ { .compatible = "mediatek,spi-ipm",
122+ .data = (void *)&mtk_ipm_compat,
123+ },
124 { .compatible = "mediatek,ipm-spi-single",
125- .data = (void *)&ipm_compat_single,
126+ .data = (void *)&mtk_ipm_compat,
127 },
128 { .compatible = "mediatek,ipm-spi-quad",
129- .data = (void *)&ipm_compat_quad,
130+ .data = (void *)&mtk_ipm_compat,
131 },
132+
133 { .compatible = "mediatek,mt2701-spi",
134 .data = (void *)&mtk_common_compat,
135 },
developer5d148cb2023-06-02 13:08:11 +0800136@@ -228,6 +227,12 @@ static const struct of_device_id mtk_spi_of_match[] = {
developer161b0452023-03-20 11:07:42 +0800137 { .compatible = "mediatek,mt8183-spi",
138 .data = (void *)&mt8183_compat,
139 },
140+ { .compatible = "mediatek,mt8192-spi",
141+ .data = (void *)&mt6765_compat,
142+ },
143+ { .compatible = "mediatek,mt6893-spi",
144+ .data = (void *)&mt6893_compat,
145+ },
146 {}
147 };
148 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
developer5d148cb2023-06-02 13:08:11 +0800149@@ -256,27 +261,30 @@ static int mtk_spi_hw_init(struct spi_master *master,
developer161b0452023-03-20 11:07:42 +0800150 cpha = spi->mode & SPI_CPHA ? 1 : 0;
151 cpol = spi->mode & SPI_CPOL ? 1 : 0;
152
153+ /* tick delay */
154 if (mdata->dev_comp->enhance_timing) {
155 if (mdata->dev_comp->ipm_design) {
156- /* CFG3 reg only used for spi-mem,
157- * here write to default value
158- */
159- writel(0x0, mdata->base + SPI_CFG3_IPM_REG);
160-
161 reg_val = readl(mdata->base + SPI_CMD_REG);
162 reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
163- reg_val |= mdata->dev_config.get_tick_dly
164- << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
165+ reg_val |= ((mdata->dev_config.get_tick_dly & 0x7)
166+ << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
167 writel(reg_val, mdata->base + SPI_CMD_REG);
168 } else {
169 reg_val = readl(mdata->base + SPI_CFG1_REG);
170- reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
171- reg_val |= mdata->dev_config.get_tick_dly
172- << SPI_CFG1_GET_TICKDLY_OFFSET;
173+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
174+ reg_val |= ((mdata->dev_config.get_tick_dly & 0x7)
175+ << SPI_CFG1_GET_TICK_DLY_OFFSET);
176 writel(reg_val, mdata->base + SPI_CFG1_REG);
177 }
178+ } else {
179+ reg_val = readl(mdata->base + SPI_CFG1_REG);
180+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
181+ reg_val |= ((mdata->dev_config.get_tick_dly & 0x3)
182+ << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
183+ writel(reg_val, mdata->base + SPI_CFG1_REG);
184 }
185
186+
187 reg_val = readl(mdata->base + SPI_CMD_REG);
188 if (mdata->dev_comp->ipm_design) {
189 /* SPI transfer without idle time until packet length done */
developer5d148cb2023-06-02 13:08:11 +0800190@@ -375,12 +383,11 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
developer161b0452023-03-20 11:07:42 +0800191 static void mtk_spi_prepare_transfer(struct spi_master *master,
192 u32 speed_hz)
193 {
194- u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
195+ u32 div, sck_time, cs_time, reg_val;
196 struct mtk_spi *mdata = spi_master_get_devdata(master);
197
198- spi_clk_hz = clk_get_rate(mdata->spi_clk);
199- if (speed_hz < spi_clk_hz / 2)
200- div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
201+ if (speed_hz < mdata->spi_clk_hz / 2)
202+ div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
203 else
204 div = 1;
205
developer5d148cb2023-06-02 13:08:11 +0800206@@ -388,13 +395,19 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
developer161b0452023-03-20 11:07:42 +0800207 cs_time = sck_time * 2;
208
209 if (mdata->dev_comp->enhance_timing) {
210- reg_val = (((sck_time - 1) & 0xffff)
211+ reg_val = readl(mdata->base + SPI_CFG2_REG);
212+ reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
213+ reg_val |= (((sck_time - 1) & 0xffff)
214 << SPI_CFG2_SCK_HIGH_OFFSET);
215+ reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
216 reg_val |= (((sck_time - 1) & 0xffff)
217 << SPI_CFG2_SCK_LOW_OFFSET);
218 writel(reg_val, mdata->base + SPI_CFG2_REG);
219- reg_val = (((cs_time - 1) & 0xffff)
220+ reg_val = readl(mdata->base + SPI_CFG0_REG);
221+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
222+ reg_val |= (((cs_time - 1) & 0xffff)
223 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
224+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
225 reg_val |= (((cs_time - 1) & 0xffff)
226 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
227 writel(reg_val, mdata->base + SPI_CFG0_REG);
developer5d148cb2023-06-02 13:08:11 +0800228@@ -453,14 +466,17 @@ static void mtk_spi_enable_transfer(struct spi_master *master)
developer161b0452023-03-20 11:07:42 +0800229 writel(cmd, mdata->base + SPI_CMD_REG);
230 }
231
232-static int mtk_spi_get_mult_delta(u32 xfer_len)
233+static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
234 {
235- u32 mult_delta;
236+ u32 mult_delta = 0;
237
238- if (xfer_len > MTK_SPI_PACKET_SIZE)
239- mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
240- else
241- mult_delta = 0;
242+ if (mdata->dev_comp->ipm_design) {
243+ if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
244+ mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
245+ } else {
246+ if (xfer_len > MTK_SPI_PACKET_SIZE)
247+ mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
248+ }
249
250 return mult_delta;
251 }
developer5d148cb2023-06-02 13:08:11 +0800252@@ -472,22 +488,22 @@ static void mtk_spi_update_mdata_len(struct spi_master *master)
developer161b0452023-03-20 11:07:42 +0800253
254 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
255 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
256- mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
257+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
258 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
259 mdata->rx_sgl_len = mult_delta;
260 mdata->tx_sgl_len -= mdata->xfer_len;
261 } else {
262- mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
263+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
264 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
265 mdata->tx_sgl_len = mult_delta;
266 mdata->rx_sgl_len -= mdata->xfer_len;
267 }
268 } else if (mdata->tx_sgl_len) {
269- mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
270+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
271 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
272 mdata->tx_sgl_len = mult_delta;
273 } else if (mdata->rx_sgl_len) {
274- mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
275+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
276 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
277 mdata->rx_sgl_len = mult_delta;
278 }
developer5d148cb2023-06-02 13:08:11 +0800279@@ -598,6 +614,19 @@ static int mtk_spi_transfer_one(struct spi_master *master,
developer161b0452023-03-20 11:07:42 +0800280 struct spi_device *spi,
281 struct spi_transfer *xfer)
282 {
283+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
284+ u32 reg_val = 0;
285+
286+ /* prepare xfer direction and duplex mode */
287+ if (mdata->dev_comp->ipm_design) {
288+ if (!xfer->tx_buf || !xfer->rx_buf) {
289+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
290+ if (xfer->rx_buf)
291+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
292+ }
293+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
294+ }
295+
296 if (master->can_dma(master, spi, xfer))
297 return mtk_spi_dma_transfer(master, spi, xfer);
298 else
developer5d148cb2023-06-02 13:08:11 +0800299@@ -618,8 +647,9 @@ static int mtk_spi_setup(struct spi_device *spi)
developer161b0452023-03-20 11:07:42 +0800300 {
301 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
302
303- if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
304- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
305+ if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
306+ /* CS de-asserted, gpiolib will handle inversion */
307+ gpiod_direction_output(spi->cs_gpiod, 0);
308
309 return 0;
310 }
developer5d148cb2023-06-02 13:08:11 +0800311@@ -747,9 +777,6 @@ static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800312 {
313 int opcode_len;
314
315- if(!op->data.nbytes)
316- return 0;
317-
318 if (op->data.dir != SPI_MEM_NO_DATA) {
319 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
320 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
developer5d148cb2023-06-02 13:08:11 +0800321@@ -765,8 +792,7 @@ static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800322 static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
323 const struct spi_mem_op *op)
324 {
325- if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
326- op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
327+ if (!spi_mem_default_supports_op(mem, op))
328 return false;
329
330 if (op->addr.nbytes && op->dummy.nbytes &&
developer5d148cb2023-06-02 13:08:11 +0800331@@ -814,13 +840,18 @@ static int mtk_spi_transfer_wait(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800332 const struct spi_mem_op *op)
333 {
334 struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
335- unsigned long long ms = 1;
336+ /*
337+ * For each byte we wait for 8 cycles of the SPI clock.
338+ * Since speed is defined in Hz and we want milliseconds,
339+ * so it should be 8 * 1000.
340+ */
341+ u64 ms = 8000LL;
342
343 if (op->data.dir == SPI_MEM_NO_DATA)
344- ms = 8LL * 1000LL * 32;
345+ ms *= 32; /* prevent we may get 0 for short transfers. */
346 else
347- ms = 8LL * 1000LL * op->data.nbytes;
348- do_div(ms, mem->spi->max_speed_hz);
349+ ms *= op->data.nbytes;
350+ ms = div_u64(ms, mem->spi->max_speed_hz);
351 ms += ms + 1000; /* 1s tolerance */
352
353 if (ms > UINT_MAX)
developer5d148cb2023-06-02 13:08:11 +0800354@@ -839,9 +870,8 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800355 const struct spi_mem_op *op)
356 {
357 struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
358- u32 reg_val, nio = 1, tx_size;
359- char *tx_tmp_buf;
360- char *rx_tmp_buf;
361+ u32 reg_val, nio, tx_size;
362+ char *tx_tmp_buf, *rx_tmp_buf;
363 int ret = 0;
364
365 mdata->use_spimem = true;
developer5d148cb2023-06-02 13:08:11 +0800366@@ -887,9 +917,11 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800367 op->dummy.buswidth == 4 ||
368 op->data.buswidth == 4)
369 nio = 4;
370+ else
371+ nio = 1;
372
373 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
374- reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
375+ reg_val |= PIN_MODE_CFG(nio);
376
377 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
378 if (op->data.dir == SPI_MEM_DATA_IN)
developer5d148cb2023-06-02 13:08:11 +0800379@@ -902,11 +934,13 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800380 if (op->data.dir == SPI_MEM_DATA_OUT)
381 tx_size += op->data.nbytes;
382
383- tx_size = max(tx_size, (u32)32);
384+ tx_size = max_t(u32, tx_size, 32);
385
386 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
387- if (!tx_tmp_buf)
388+ if (!tx_tmp_buf) {
389+ mdata->use_spimem = false;
390 return -ENOMEM;
391+ }
392
393 tx_tmp_buf[0] = op->cmd.opcode;
394
developer5d148cb2023-06-02 13:08:11 +0800395@@ -937,12 +971,15 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800396
397 if (op->data.dir == SPI_MEM_DATA_IN) {
398 if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
399- rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL | GFP_DMA);
400- if (!rx_tmp_buf)
401- return -ENOMEM;
402- }
403- else
404+ rx_tmp_buf = kzalloc(op->data.nbytes,
405+ GFP_KERNEL | GFP_DMA);
406+ if (!rx_tmp_buf) {
407+ ret = -ENOMEM;
408+ goto unmap_tx_dma;
409+ }
410+ } else {
411 rx_tmp_buf = op->data.buf.in;
412+ }
413
414 mdata->rx_dma = dma_map_single(mdata->dev,
415 rx_tmp_buf,
developer5d148cb2023-06-02 13:08:11 +0800416@@ -950,7 +987,7 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem,
developer161b0452023-03-20 11:07:42 +0800417 DMA_FROM_DEVICE);
418 if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
419 ret = -ENOMEM;
420- goto unmap_tx_dma;
421+ goto kfree_rx_tmp_buf;
422 }
423 }
424
425@@ -980,11 +1017,13 @@ unmap_rx_dma:
426 if (op->data.dir == SPI_MEM_DATA_IN) {
427 dma_unmap_single(mdata->dev, mdata->rx_dma,
428 op->data.nbytes, DMA_FROM_DEVICE);
429- if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
430+ if(!IS_ALIGNED((size_t)op->data.buf.in, 4))
431 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
432- kfree(rx_tmp_buf);
433- }
434 }
435+kfree_rx_tmp_buf:
436+ if (op->data.dir == SPI_MEM_DATA_IN &&
437+ !IS_ALIGNED((size_t)op->data.buf.in, 4))
438+ kfree(rx_tmp_buf);
439 unmap_tx_dma:
440 dma_unmap_single(mdata->dev, mdata->tx_dma,
441 tx_size, DMA_TO_DEVICE);
developer5d148cb2023-06-02 13:08:11 +0800442@@ -1003,19 +1042,19 @@ static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
developer161b0452023-03-20 11:07:42 +0800443
444 static int mtk_spi_probe(struct platform_device *pdev)
445 {
446+ struct device *dev = &pdev->dev;
447 struct spi_master *master;
448 struct mtk_spi *mdata;
449- const struct of_device_id *of_id;
450 int i, irq, ret, addr_bits;
451
452- master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
453+ master = devm_spi_alloc_master(dev, sizeof(*mdata));
454 if (!master) {
455- dev_err(&pdev->dev, "failed to alloc spi master\n");
456+ dev_err(dev, "failed to alloc spi master\n");
457 return -ENOMEM;
458 }
459
460 master->auto_runtime_pm = true;
461- master->dev.of_node = pdev->dev.of_node;
462+ master->dev.of_node = dev->of_node;
463 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
464
465 master->set_cs = mtk_spi_set_cs;
developer5d148cb2023-06-02 13:08:11 +0800466@@ -1023,23 +1062,16 @@ static int mtk_spi_probe(struct platform_device *pdev)
developer161b0452023-03-20 11:07:42 +0800467 master->transfer_one = mtk_spi_transfer_one;
468 master->can_dma = mtk_spi_can_dma;
469 master->setup = mtk_spi_setup;
470-
471+ master->use_gpio_descriptors = true;
472 master->append_caldata = mtk_spi_append_caldata;
473
474- of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
475- if (!of_id) {
476- dev_err(&pdev->dev, "failed to probe of_node\n");
477- ret = -EINVAL;
478- goto err_put_master;
479- }
480-
481 mdata = spi_master_get_devdata(master);
482
483 /* Set device configs to default first. Calibrate it later. */
484 mdata->dev_config.sample_sel = 0;
485 mdata->dev_config.get_tick_dly = 2;
486
487- mdata->dev_comp = of_id->data;
488+ mdata->dev_comp = device_get_match_data(dev);
489
490 if (mdata->dev_comp->enhance_timing)
491 master->mode_bits |= SPI_CS_HIGH;
developer5d148cb2023-06-02 13:08:11 +0800492@@ -1050,27 +1082,23 @@ static int mtk_spi_probe(struct platform_device *pdev)
developer161b0452023-03-20 11:07:42 +0800493 if (mdata->dev_comp->ipm_design)
494 master->mode_bits |= SPI_LOOP;
495
496- if (mdata->dev_comp->support_quad) {
497+ if (mdata->dev_comp->ipm_design) {
498+ mdata->dev = dev;
499 master->mem_ops = &mtk_spi_mem_ops;
500- master->mode_bits |= SPI_RX_DUAL | SPI_TX_DUAL |
501- SPI_RX_QUAD | SPI_TX_QUAD;
502-
503- mdata->dev = &pdev->dev;
504 init_completion(&mdata->spimem_done);
505 }
506
507 if (mdata->dev_comp->need_pad_sel) {
508- mdata->pad_num = of_property_count_u32_elems(
509- pdev->dev.of_node,
510+ mdata->pad_num = of_property_count_u32_elems(dev->of_node,
511 "mediatek,pad-select");
512 if (mdata->pad_num < 0) {
513- dev_err(&pdev->dev,
514+ dev_err(dev,
515 "No 'mediatek,pad-select' property\n");
516 ret = -EINVAL;
517 goto err_put_master;
518 }
519
520- mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
521+ mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
522 sizeof(u32), GFP_KERNEL);
523 if (!mdata->pad_sel) {
524 ret = -ENOMEM;
developer5d148cb2023-06-02 13:08:11 +0800525@@ -1078,11 +1106,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
developer161b0452023-03-20 11:07:42 +0800526 }
527
528 for (i = 0; i < mdata->pad_num; i++) {
529- of_property_read_u32_index(pdev->dev.of_node,
530+ of_property_read_u32_index(dev->of_node,
531 "mediatek,pad-select",
532 i, &mdata->pad_sel[i]);
533 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
534- dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
535+ dev_err(dev, "wrong pad-sel[%d]: %u\n",
536 i, mdata->pad_sel[i]);
537 ret = -EINVAL;
538 goto err_put_master;
developer5d148cb2023-06-02 13:08:11 +0800539@@ -1103,122 +1131,118 @@ static int mtk_spi_probe(struct platform_device *pdev)
developer161b0452023-03-20 11:07:42 +0800540 goto err_put_master;
541 }
542
543- if (!pdev->dev.dma_mask)
544- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
545+ if (!dev->dma_mask)
546+ dev->dma_mask = &dev->coherent_dma_mask;
547+
548+ if (mdata->dev_comp->ipm_design)
549+ dma_set_max_seg_size(dev, SZ_16M);
550+ else
551+ dma_set_max_seg_size(dev, SZ_256K);
552
553- ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
554- IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
555+ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
556+ IRQF_TRIGGER_NONE, dev_name(dev), master);
557 if (ret) {
558- dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
559+ dev_err(dev, "failed to register irq (%d)\n", ret);
560 goto err_put_master;
561 }
562
563
564- mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
565+ mdata->parent_clk = devm_clk_get(dev, "parent-clk");
566 if (IS_ERR(mdata->parent_clk)) {
567 ret = PTR_ERR(mdata->parent_clk);
568- dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
569+ dev_err(dev, "failed to get parent-clk: %d\n", ret);
570 goto err_put_master;
571 }
572
573- mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
574+ mdata->sel_clk = devm_clk_get(dev, "sel-clk");
575 if (IS_ERR(mdata->sel_clk)) {
576 ret = PTR_ERR(mdata->sel_clk);
577- dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
578+ dev_err(dev, "failed to get sel-clk: %d\n", ret);
579 goto err_put_master;
580 }
581
582- mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
583+ mdata->spi_clk = devm_clk_get(dev, "spi-clk");
584 if (IS_ERR(mdata->spi_clk)) {
585 ret = PTR_ERR(mdata->spi_clk);
586- dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
587+ dev_err(dev, "failed to get spi-clk: %d\n", ret);
588 goto err_put_master;
589 }
590
591- if (mdata->dev_comp->need_ahb_clk) {
592- mdata->spi_hclk = devm_clk_get(&pdev->dev, "spi-hclk");
593- if (IS_ERR(mdata->spi_hclk)) {
594- ret = PTR_ERR(mdata->spi_hclk);
595- dev_err(&pdev->dev, "failed to get spi-hclk: %d\n", ret);
596- goto err_put_master;
597- }
598-
599- ret = clk_prepare_enable(mdata->spi_hclk);
600- if (ret < 0) {
601- dev_err(&pdev->dev, "failed to enable spi_hclk (%d)\n", ret);
602- goto err_put_master;
603- }
604+ mdata->spi_hclk = devm_clk_get_optional(dev, "spi-hclk");
605+ if (IS_ERR(mdata->spi_hclk)) {
606+ ret = PTR_ERR(mdata->spi_hclk);
607+ dev_err(dev, "failed to get spi-hclk: %d\n", ret);
608+ goto err_put_master;
609 }
610
611- ret = clk_prepare_enable(mdata->spi_clk);
612+ ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
613 if (ret < 0) {
614- dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
615+ dev_err(dev, "failed to clk_set_parent (%d)\n", ret);
616 goto err_put_master;
617 }
618-
619- ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
620+
621+ ret = clk_prepare_enable(mdata->spi_hclk);
622 if (ret < 0) {
623- dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
624- clk_disable_unprepare(mdata->spi_clk);
625+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
626 goto err_put_master;
627 }
628
629- clk_disable_unprepare(mdata->spi_clk);
630-
631- if (mdata->dev_comp->need_ahb_clk)
632+ ret = clk_prepare_enable(mdata->spi_clk);
633+ if (ret < 0) {
634 clk_disable_unprepare(mdata->spi_hclk);
635+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
636+ goto err_put_master;
637+ }
638
639- pm_runtime_enable(&pdev->dev);
640+ mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
641
642- ret = devm_spi_register_master(&pdev->dev, master);
643- if (ret) {
644- dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
645- goto err_disable_runtime_pm;
646+ if (mdata->dev_comp->no_need_unprepare) {
647+ clk_disable(mdata->spi_clk);
648+ clk_disable(mdata->spi_hclk);
649+ } else {
650+ clk_disable_unprepare(mdata->spi_clk);
651+ clk_disable_unprepare(mdata->spi_hclk);
652 }
653
654 if (mdata->dev_comp->need_pad_sel) {
655 if (mdata->pad_num != master->num_chipselect) {
656- dev_err(&pdev->dev,
657+ dev_err(dev,
658 "pad_num does not match num_chipselect(%d != %d)\n",
659 mdata->pad_num, master->num_chipselect);
660 ret = -EINVAL;
661- goto err_disable_runtime_pm;
662+ goto err_put_master;
663 }
664
665- if (!master->cs_gpios && master->num_chipselect > 1) {
666- dev_err(&pdev->dev,
667+ if (!master->cs_gpiods && master->num_chipselect > 1) {
668+ dev_err(dev,
669 "cs_gpios not specified and num_chipselect > 1\n");
670 ret = -EINVAL;
671- goto err_disable_runtime_pm;
672+ goto err_put_master;
673 }
674
675- if (master->cs_gpios) {
676- for (i = 0; i < master->num_chipselect; i++) {
677- ret = devm_gpio_request(&pdev->dev,
678- master->cs_gpios[i],
679- dev_name(&pdev->dev));
680- if (ret) {
681- dev_err(&pdev->dev,
682- "can't get CS GPIO %i\n", i);
683- goto err_disable_runtime_pm;
684- }
685- }
686- }
687 }
688
689 if (mdata->dev_comp->dma_ext)
690 addr_bits = DMA_ADDR_EXT_BITS;
691 else
692 addr_bits = DMA_ADDR_DEF_BITS;
693- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
694+ ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
695 if (ret)
696- dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
697+ dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
698 addr_bits, ret);
699
700+ pm_runtime_enable(dev);
701+
702+ ret = devm_spi_register_master(dev, master);
703+ if (ret) {
704+ dev_err(dev, "failed to register master (%d)\n", ret);
705+ goto err_disable_runtime_pm;
706+ }
707+
708 return 0;
709
710 err_disable_runtime_pm:
711- pm_runtime_disable(&pdev->dev);
712+ pm_runtime_disable(dev);
713 err_put_master:
714 spi_master_put(master);
715
developer5d148cb2023-06-02 13:08:11 +0800716@@ -1229,11 +1253,22 @@ static int mtk_spi_remove(struct platform_device *pdev)
developer161b0452023-03-20 11:07:42 +0800717 {
718 struct spi_master *master = platform_get_drvdata(pdev);
719 struct mtk_spi *mdata = spi_master_get_devdata(master);
720+ int ret;
721
722- pm_runtime_disable(&pdev->dev);
723+ ret = pm_runtime_resume_and_get(&pdev->dev);
724+ if (ret < 0)
725+ return ret;
726
727 mtk_spi_reset(mdata);
728
729+ if (mdata->dev_comp->no_need_unprepare) {
730+ clk_unprepare(mdata->spi_clk);
731+ clk_unprepare(mdata->spi_hclk);
732+ }
733+
734+ pm_runtime_put_noidle(&pdev->dev);
735+ pm_runtime_disable(&pdev->dev);
736+
737 return 0;
738 }
739
developer5d148cb2023-06-02 13:08:11 +0800740@@ -1250,8 +1285,7 @@ static int mtk_spi_suspend(struct device *dev)
developer161b0452023-03-20 11:07:42 +0800741
742 if (!pm_runtime_suspended(dev)) {
743 clk_disable_unprepare(mdata->spi_clk);
744- if (mdata->dev_comp->need_ahb_clk)
745- clk_disable_unprepare(mdata->spi_hclk);
746+ clk_disable_unprepare(mdata->spi_hclk);
747 }
748
749 return ret;
developer5d148cb2023-06-02 13:08:11 +0800750@@ -1264,26 +1298,24 @@ static int mtk_spi_resume(struct device *dev)
developer161b0452023-03-20 11:07:42 +0800751 struct mtk_spi *mdata = spi_master_get_devdata(master);
752
753 if (!pm_runtime_suspended(dev)) {
754- if (mdata->dev_comp->need_ahb_clk) {
755- ret = clk_prepare_enable(mdata->spi_hclk);
756- if (ret < 0) {
757- dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
758- return ret;
759- }
760- }
761-
762 ret = clk_prepare_enable(mdata->spi_clk);
763 if (ret < 0) {
764 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
765 return ret;
766 }
767+
768+ ret = clk_prepare_enable(mdata->spi_hclk);
769+ if (ret < 0) {
770+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
771+ clk_disable_unprepare(mdata->spi_clk);
772+ return ret;
773+ }
774 }
775
776 ret = spi_master_resume(master);
777 if (ret < 0) {
778 clk_disable_unprepare(mdata->spi_clk);
779- if (mdata->dev_comp->need_ahb_clk)
780- clk_disable_unprepare(mdata->spi_hclk);
781+ clk_disable_unprepare(mdata->spi_hclk);
782 }
783
784 return ret;
developer5d148cb2023-06-02 13:08:11 +0800785@@ -1296,10 +1328,13 @@ static int mtk_spi_runtime_suspend(struct device *dev)
developer161b0452023-03-20 11:07:42 +0800786 struct spi_master *master = dev_get_drvdata(dev);
787 struct mtk_spi *mdata = spi_master_get_devdata(master);
788
789- clk_disable_unprepare(mdata->spi_clk);
790-
791- if (mdata->dev_comp->need_ahb_clk)
792+ if (mdata->dev_comp->no_need_unprepare) {
793+ clk_disable(mdata->spi_clk);
794+ clk_disable(mdata->spi_hclk);
795+ } else {
796+ clk_disable_unprepare(mdata->spi_clk);
797 clk_disable_unprepare(mdata->spi_hclk);
798+ }
799
800 return 0;
801 }
developer5d148cb2023-06-02 13:08:11 +0800802@@ -1310,18 +1345,31 @@ static int mtk_spi_runtime_resume(struct device *dev)
developer161b0452023-03-20 11:07:42 +0800803 struct mtk_spi *mdata = spi_master_get_devdata(master);
804 int ret;
805
806- if (mdata->dev_comp->need_ahb_clk) {
807- ret = clk_prepare_enable(mdata->spi_hclk);
808+ if (mdata->dev_comp->no_need_unprepare) {
809+ ret = clk_enable(mdata->spi_clk);
810+ if (ret < 0) {
811+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
812+ return ret;
813+ }
814+ ret = clk_enable(mdata->spi_hclk);
815 if (ret < 0) {
816 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
817+ clk_disable(mdata->spi_clk);
818+ return ret;
819+ }
820+ } else {
821+ ret = clk_prepare_enable(mdata->spi_clk);
822+ if (ret < 0) {
823+ dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
824 return ret;
825 }
826- }
827
828- ret = clk_prepare_enable(mdata->spi_clk);
829- if (ret < 0) {
830- dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
831- return ret;
832+ ret = clk_prepare_enable(mdata->spi_hclk);
833+ if (ret < 0) {
834+ dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
835+ clk_disable_unprepare(mdata->spi_clk);
836+ return ret;
837+ }
838 }
839
840 return 0;
developer5d148cb2023-06-02 13:08:11 +0800841--
8422.34.1
843