blob: 4900733bc6ed380ee0f3350b87b8088c0674acbe [file] [log] [blame]
developer161b0452023-03-20 11:07:42 +08001--- a/drivers/spi/spi-mt65xx.c
2+++ b/drivers/spi/spi-mt65xx.c
3@@ -12,7 +12,7 @@
4 #include <linux/ioport.h>
5 #include <linux/module.h>
6 #include <linux/of.h>
7-#include <linux/of_gpio.h>
8+#include <linux/gpio/consumer.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/spi/spi.h>
12@@ -43,9 +43,11 @@
13 #define SPI_CFG1_CS_IDLE_OFFSET 0
14 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
15 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
16-#define SPI_CFG1_GET_TICKDLY_OFFSET 29
17+#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
18+#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
19
20-#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
21+#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
22+#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
23 #define SPI_CFG1_CS_IDLE_MASK 0xff
24 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
25 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
26@@ -78,7 +80,6 @@
27
28 #define PIN_MODE_CFG(x) ((x) / 2)
29
30-#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
31 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
32 #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
33 #define SPI_CFG3_IPM_XMODE_EN BIT(4)
34@@ -94,14 +95,14 @@
35
36 #define MTK_SPI_PAUSE_INT_STATUS 0x2
37
38-#define MTK_SPI_IDLE 0
39-#define MTK_SPI_PAUSED 1
40-
41 #define MTK_SPI_MAX_FIFO_SIZE 32U
42 #define MTK_SPI_PACKET_SIZE 1024
43 #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
44 #define MTK_SPI_IPM_PACKET_LOOP SZ_256
45
46+#define MTK_SPI_IDLE 0
47+#define MTK_SPI_PAUSED 1
48+
49 #define MTK_SPI_32BITS_MASK (0xffffffff)
50
51 #define DMA_ADDR_EXT_BITS (36)
52@@ -115,11 +116,8 @@ struct mtk_spi_compatible {
53 bool enhance_timing;
54 /* some IC support DMA addr extension */
55 bool dma_ext;
56- /* the IPM IP design improve some feature, and support dual/quad mode */
57+ bool no_need_unprepare;
58 bool ipm_design;
59- bool support_quad;
60- /* some IC ahb & apb clk is different and also need to be enabled */
61- bool need_ahb_clk;
62 };
63
64 struct mtk_spi_config {
65@@ -140,7 +138,7 @@ struct mtk_spi {
66 u32 tx_sgl_len, rx_sgl_len;
67 const struct mtk_spi_compatible *dev_comp;
68 struct mtk_spi_config dev_config;
69-
70+ u32 spi_clk_hz;
71 struct completion spimem_done;
72 bool use_spimem;
73 struct device *dev;
74@@ -154,21 +152,10 @@ static const struct mtk_spi_compatible m
75 .must_tx = true,
76 };
77
78-static const struct mtk_spi_compatible ipm_compat_single = {
79- .must_tx = true,
80+static const struct mtk_spi_compatible mtk_ipm_compat = {
81 .enhance_timing = true,
82 .dma_ext = true,
83 .ipm_design = true,
84- .need_ahb_clk = true,
85-};
86-
87-static const struct mtk_spi_compatible ipm_compat_quad = {
88- .must_tx = true,
89- .enhance_timing = true,
90- .dma_ext = true,
91- .ipm_design = true,
92- .support_quad = true,
93- .need_ahb_clk = true,
94 };
95
96 static const struct mtk_spi_compatible mt6765_compat = {
97@@ -194,13 +181,25 @@ static const struct mtk_spi_compatible m
98 .enhance_timing = true,
99 };
100
101+static const struct mtk_spi_compatible mt6893_compat = {
102+ .need_pad_sel = true,
103+ .must_tx = true,
104+ .enhance_timing = true,
105+ .dma_ext = true,
106+ .no_need_unprepare = true,
107+};
108+
109 static const struct of_device_id mtk_spi_of_match[] = {
110+ { .compatible = "mediatek,spi-ipm",
111+ .data = (void *)&mtk_ipm_compat,
112+ },
113 { .compatible = "mediatek,ipm-spi-single",
114- .data = (void *)&ipm_compat_single,
115+ .data = (void *)&mtk_ipm_compat,
116 },
117 { .compatible = "mediatek,ipm-spi-quad",
118- .data = (void *)&ipm_compat_quad,
119+ .data = (void *)&mtk_ipm_compat,
120 },
121+
122 { .compatible = "mediatek,mt2701-spi",
123 .data = (void *)&mtk_common_compat,
124 },
125@@ -228,6 +227,12 @@ static const struct of_device_id mtk_spi
126 { .compatible = "mediatek,mt8183-spi",
127 .data = (void *)&mt8183_compat,
128 },
129+ { .compatible = "mediatek,mt8192-spi",
130+ .data = (void *)&mt6765_compat,
131+ },
132+ { .compatible = "mediatek,mt6893-spi",
133+ .data = (void *)&mt6893_compat,
134+ },
135 {}
136 };
137 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
138@@ -256,27 +261,30 @@ static int mtk_spi_hw_init(struct spi_ma
139 cpha = spi->mode & SPI_CPHA ? 1 : 0;
140 cpol = spi->mode & SPI_CPOL ? 1 : 0;
141
142+ /* tick delay */
143 if (mdata->dev_comp->enhance_timing) {
144 if (mdata->dev_comp->ipm_design) {
145- /* CFG3 reg only used for spi-mem,
146- * here write to default value
147- */
148- writel(0x0, mdata->base + SPI_CFG3_IPM_REG);
149-
150 reg_val = readl(mdata->base + SPI_CMD_REG);
151 reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
152- reg_val |= mdata->dev_config.get_tick_dly
153- << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
154+ reg_val |= ((mdata->dev_config.get_tick_dly & 0x7)
155+ << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
156 writel(reg_val, mdata->base + SPI_CMD_REG);
157 } else {
158 reg_val = readl(mdata->base + SPI_CFG1_REG);
159- reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
160- reg_val |= mdata->dev_config.get_tick_dly
161- << SPI_CFG1_GET_TICKDLY_OFFSET;
162+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
163+ reg_val |= ((mdata->dev_config.get_tick_dly & 0x7)
164+ << SPI_CFG1_GET_TICK_DLY_OFFSET);
165 writel(reg_val, mdata->base + SPI_CFG1_REG);
166 }
167+ } else {
168+ reg_val = readl(mdata->base + SPI_CFG1_REG);
169+ reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
170+ reg_val |= ((mdata->dev_config.get_tick_dly & 0x3)
171+ << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
172+ writel(reg_val, mdata->base + SPI_CFG1_REG);
173 }
174
175+
176 reg_val = readl(mdata->base + SPI_CMD_REG);
177 if (mdata->dev_comp->ipm_design) {
178 /* SPI transfer without idle time until packet length done */
179@@ -375,12 +383,11 @@ static void mtk_spi_set_cs(struct spi_de
180 static void mtk_spi_prepare_transfer(struct spi_master *master,
181 u32 speed_hz)
182 {
183- u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
184+ u32 div, sck_time, cs_time, reg_val;
185 struct mtk_spi *mdata = spi_master_get_devdata(master);
186
187- spi_clk_hz = clk_get_rate(mdata->spi_clk);
188- if (speed_hz < spi_clk_hz / 2)
189- div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
190+ if (speed_hz < mdata->spi_clk_hz / 2)
191+ div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
192 else
193 div = 1;
194
195@@ -388,13 +395,19 @@ static void mtk_spi_prepare_transfer(str
196 cs_time = sck_time * 2;
197
198 if (mdata->dev_comp->enhance_timing) {
199- reg_val = (((sck_time - 1) & 0xffff)
200+ reg_val = readl(mdata->base + SPI_CFG2_REG);
201+ reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
202+ reg_val |= (((sck_time - 1) & 0xffff)
203 << SPI_CFG2_SCK_HIGH_OFFSET);
204+ reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
205 reg_val |= (((sck_time - 1) & 0xffff)
206 << SPI_CFG2_SCK_LOW_OFFSET);
207 writel(reg_val, mdata->base + SPI_CFG2_REG);
208- reg_val = (((cs_time - 1) & 0xffff)
209+ reg_val = readl(mdata->base + SPI_CFG0_REG);
210+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
211+ reg_val |= (((cs_time - 1) & 0xffff)
212 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
213+ reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
214 reg_val |= (((cs_time - 1) & 0xffff)
215 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
216 writel(reg_val, mdata->base + SPI_CFG0_REG);
217@@ -453,14 +466,17 @@ static void mtk_spi_enable_transfer(stru
218 writel(cmd, mdata->base + SPI_CMD_REG);
219 }
220
221-static int mtk_spi_get_mult_delta(u32 xfer_len)
222+static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
223 {
224- u32 mult_delta;
225+ u32 mult_delta = 0;
226
227- if (xfer_len > MTK_SPI_PACKET_SIZE)
228- mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
229- else
230- mult_delta = 0;
231+ if (mdata->dev_comp->ipm_design) {
232+ if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
233+ mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
234+ } else {
235+ if (xfer_len > MTK_SPI_PACKET_SIZE)
236+ mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
237+ }
238
239 return mult_delta;
240 }
241@@ -472,22 +488,22 @@ static void mtk_spi_update_mdata_len(str
242
243 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
244 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
245- mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
246+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
247 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
248 mdata->rx_sgl_len = mult_delta;
249 mdata->tx_sgl_len -= mdata->xfer_len;
250 } else {
251- mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
252+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
253 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
254 mdata->tx_sgl_len = mult_delta;
255 mdata->rx_sgl_len -= mdata->xfer_len;
256 }
257 } else if (mdata->tx_sgl_len) {
258- mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
259+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
260 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
261 mdata->tx_sgl_len = mult_delta;
262 } else if (mdata->rx_sgl_len) {
263- mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
264+ mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
265 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
266 mdata->rx_sgl_len = mult_delta;
267 }
268@@ -598,6 +614,19 @@ static int mtk_spi_transfer_one(struct s
269 struct spi_device *spi,
270 struct spi_transfer *xfer)
271 {
272+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
273+ u32 reg_val = 0;
274+
275+ /* prepare xfer direction and duplex mode */
276+ if (mdata->dev_comp->ipm_design) {
277+ if (!xfer->tx_buf || !xfer->rx_buf) {
278+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
279+ if (xfer->rx_buf)
280+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
281+ }
282+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
283+ }
284+
285 if (master->can_dma(master, spi, xfer))
286 return mtk_spi_dma_transfer(master, spi, xfer);
287 else
288@@ -618,8 +647,9 @@ static int mtk_spi_setup(struct spi_devi
289 {
290 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
291
292- if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
293- gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
294+ if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
295+ /* CS de-asserted, gpiolib will handle inversion */
296+ gpiod_direction_output(spi->cs_gpiod, 0);
297
298 return 0;
299 }
300@@ -747,9 +777,6 @@ static int mtk_spi_mem_adjust_op_size(st
301 {
302 int opcode_len;
303
304- if(!op->data.nbytes)
305- return 0;
306-
307 if (op->data.dir != SPI_MEM_NO_DATA) {
308 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
309 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
310@@ -765,8 +792,7 @@ static int mtk_spi_mem_adjust_op_size(st
311 static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
312 const struct spi_mem_op *op)
313 {
314- if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
315- op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
316+ if (!spi_mem_default_supports_op(mem, op))
317 return false;
318
319 if (op->addr.nbytes && op->dummy.nbytes &&
320@@ -814,13 +840,18 @@ static int mtk_spi_transfer_wait(struct
321 const struct spi_mem_op *op)
322 {
323 struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
324- unsigned long long ms = 1;
325+ /*
326+ * For each byte we wait for 8 cycles of the SPI clock.
327+ * Since speed is defined in Hz and we want milliseconds,
328+ * so it should be 8 * 1000.
329+ */
330+ u64 ms = 8000LL;
331
332 if (op->data.dir == SPI_MEM_NO_DATA)
333- ms = 8LL * 1000LL * 32;
334+ ms *= 32; /* prevent we may get 0 for short transfers. */
335 else
336- ms = 8LL * 1000LL * op->data.nbytes;
337- do_div(ms, mem->spi->max_speed_hz);
338+ ms *= op->data.nbytes;
339+ ms = div_u64(ms, mem->spi->max_speed_hz);
340 ms += ms + 1000; /* 1s tolerance */
341
342 if (ms > UINT_MAX)
343@@ -839,9 +870,8 @@ static int mtk_spi_mem_exec_op(struct sp
344 const struct spi_mem_op *op)
345 {
346 struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
347- u32 reg_val, nio = 1, tx_size;
348- char *tx_tmp_buf;
349- char *rx_tmp_buf;
350+ u32 reg_val, nio, tx_size;
351+ char *tx_tmp_buf, *rx_tmp_buf;
352 int ret = 0;
353
354 mdata->use_spimem = true;
355@@ -887,9 +917,11 @@ static int mtk_spi_mem_exec_op(struct sp
356 op->dummy.buswidth == 4 ||
357 op->data.buswidth == 4)
358 nio = 4;
359+ else
360+ nio = 1;
361
362 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
363- reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
364+ reg_val |= PIN_MODE_CFG(nio);
365
366 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
367 if (op->data.dir == SPI_MEM_DATA_IN)
368@@ -902,11 +934,13 @@ static int mtk_spi_mem_exec_op(struct sp
369 if (op->data.dir == SPI_MEM_DATA_OUT)
370 tx_size += op->data.nbytes;
371
372- tx_size = max(tx_size, (u32)32);
373+ tx_size = max_t(u32, tx_size, 32);
374
375 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
376- if (!tx_tmp_buf)
377+ if (!tx_tmp_buf) {
378+ mdata->use_spimem = false;
379 return -ENOMEM;
380+ }
381
382 tx_tmp_buf[0] = op->cmd.opcode;
383
384@@ -937,12 +971,15 @@ static int mtk_spi_mem_exec_op(struct sp
385
386 if (op->data.dir == SPI_MEM_DATA_IN) {
387 if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
388- rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL | GFP_DMA);
389- if (!rx_tmp_buf)
390- return -ENOMEM;
391- }
392- else
393+ rx_tmp_buf = kzalloc(op->data.nbytes,
394+ GFP_KERNEL | GFP_DMA);
395+ if (!rx_tmp_buf) {
396+ ret = -ENOMEM;
397+ goto unmap_tx_dma;
398+ }
399+ } else {
400 rx_tmp_buf = op->data.buf.in;
401+ }
402
403 mdata->rx_dma = dma_map_single(mdata->dev,
404 rx_tmp_buf,
405@@ -950,7 +987,7 @@ static int mtk_spi_mem_exec_op(struct sp
406 DMA_FROM_DEVICE);
407 if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
408 ret = -ENOMEM;
409- goto unmap_tx_dma;
410+ goto kfree_rx_tmp_buf;
411 }
412 }
413
414@@ -980,11 +1017,13 @@ unmap_rx_dma:
415 if (op->data.dir == SPI_MEM_DATA_IN) {
416 dma_unmap_single(mdata->dev, mdata->rx_dma,
417 op->data.nbytes, DMA_FROM_DEVICE);
418- if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
419+ if(!IS_ALIGNED((size_t)op->data.buf.in, 4))
420 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
421- kfree(rx_tmp_buf);
422- }
423 }
424+kfree_rx_tmp_buf:
425+ if (op->data.dir == SPI_MEM_DATA_IN &&
426+ !IS_ALIGNED((size_t)op->data.buf.in, 4))
427+ kfree(rx_tmp_buf);
428 unmap_tx_dma:
429 dma_unmap_single(mdata->dev, mdata->tx_dma,
430 tx_size, DMA_TO_DEVICE);
431@@ -1003,19 +1042,19 @@ static const struct spi_controller_mem_o
432
433 static int mtk_spi_probe(struct platform_device *pdev)
434 {
435+ struct device *dev = &pdev->dev;
436 struct spi_master *master;
437 struct mtk_spi *mdata;
438- const struct of_device_id *of_id;
439 int i, irq, ret, addr_bits;
440
441- master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
442+ master = devm_spi_alloc_master(dev, sizeof(*mdata));
443 if (!master) {
444- dev_err(&pdev->dev, "failed to alloc spi master\n");
445+ dev_err(dev, "failed to alloc spi master\n");
446 return -ENOMEM;
447 }
448
449 master->auto_runtime_pm = true;
450- master->dev.of_node = pdev->dev.of_node;
451+ master->dev.of_node = dev->of_node;
452 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
453
454 master->set_cs = mtk_spi_set_cs;
455@@ -1023,23 +1062,16 @@ static int mtk_spi_probe(struct platform
456 master->transfer_one = mtk_spi_transfer_one;
457 master->can_dma = mtk_spi_can_dma;
458 master->setup = mtk_spi_setup;
459-
460+ master->use_gpio_descriptors = true;
461 master->append_caldata = mtk_spi_append_caldata;
462
463- of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
464- if (!of_id) {
465- dev_err(&pdev->dev, "failed to probe of_node\n");
466- ret = -EINVAL;
467- goto err_put_master;
468- }
469-
470 mdata = spi_master_get_devdata(master);
471
472 /* Set device configs to default first. Calibrate it later. */
473 mdata->dev_config.sample_sel = 0;
474 mdata->dev_config.get_tick_dly = 2;
475
476- mdata->dev_comp = of_id->data;
477+ mdata->dev_comp = device_get_match_data(dev);
478
479 if (mdata->dev_comp->enhance_timing)
480 master->mode_bits |= SPI_CS_HIGH;
481@@ -1050,27 +1082,23 @@ static int mtk_spi_probe(struct platform
482 if (mdata->dev_comp->ipm_design)
483 master->mode_bits |= SPI_LOOP;
484
485- if (mdata->dev_comp->support_quad) {
486+ if (mdata->dev_comp->ipm_design) {
487+ mdata->dev = dev;
488 master->mem_ops = &mtk_spi_mem_ops;
489- master->mode_bits |= SPI_RX_DUAL | SPI_TX_DUAL |
490- SPI_RX_QUAD | SPI_TX_QUAD;
491-
492- mdata->dev = &pdev->dev;
493 init_completion(&mdata->spimem_done);
494 }
495
496 if (mdata->dev_comp->need_pad_sel) {
497- mdata->pad_num = of_property_count_u32_elems(
498- pdev->dev.of_node,
499+ mdata->pad_num = of_property_count_u32_elems(dev->of_node,
500 "mediatek,pad-select");
501 if (mdata->pad_num < 0) {
502- dev_err(&pdev->dev,
503+ dev_err(dev,
504 "No 'mediatek,pad-select' property\n");
505 ret = -EINVAL;
506 goto err_put_master;
507 }
508
509- mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
510+ mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
511 sizeof(u32), GFP_KERNEL);
512 if (!mdata->pad_sel) {
513 ret = -ENOMEM;
514@@ -1078,11 +1106,11 @@ static int mtk_spi_probe(struct platform
515 }
516
517 for (i = 0; i < mdata->pad_num; i++) {
518- of_property_read_u32_index(pdev->dev.of_node,
519+ of_property_read_u32_index(dev->of_node,
520 "mediatek,pad-select",
521 i, &mdata->pad_sel[i]);
522 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
523- dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
524+ dev_err(dev, "wrong pad-sel[%d]: %u\n",
525 i, mdata->pad_sel[i]);
526 ret = -EINVAL;
527 goto err_put_master;
528@@ -1103,122 +1131,118 @@ static int mtk_spi_probe(struct platform
529 goto err_put_master;
530 }
531
532- if (!pdev->dev.dma_mask)
533- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
534+ if (!dev->dma_mask)
535+ dev->dma_mask = &dev->coherent_dma_mask;
536+
537+ if (mdata->dev_comp->ipm_design)
538+ dma_set_max_seg_size(dev, SZ_16M);
539+ else
540+ dma_set_max_seg_size(dev, SZ_256K);
541
542- ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
543- IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
544+ ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
545+ IRQF_TRIGGER_NONE, dev_name(dev), master);
546 if (ret) {
547- dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
548+ dev_err(dev, "failed to register irq (%d)\n", ret);
549 goto err_put_master;
550 }
551
552
553- mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
554+ mdata->parent_clk = devm_clk_get(dev, "parent-clk");
555 if (IS_ERR(mdata->parent_clk)) {
556 ret = PTR_ERR(mdata->parent_clk);
557- dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
558+ dev_err(dev, "failed to get parent-clk: %d\n", ret);
559 goto err_put_master;
560 }
561
562- mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
563+ mdata->sel_clk = devm_clk_get(dev, "sel-clk");
564 if (IS_ERR(mdata->sel_clk)) {
565 ret = PTR_ERR(mdata->sel_clk);
566- dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
567+ dev_err(dev, "failed to get sel-clk: %d\n", ret);
568 goto err_put_master;
569 }
570
571- mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
572+ mdata->spi_clk = devm_clk_get(dev, "spi-clk");
573 if (IS_ERR(mdata->spi_clk)) {
574 ret = PTR_ERR(mdata->spi_clk);
575- dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
576+ dev_err(dev, "failed to get spi-clk: %d\n", ret);
577 goto err_put_master;
578 }
579
580- if (mdata->dev_comp->need_ahb_clk) {
581- mdata->spi_hclk = devm_clk_get(&pdev->dev, "spi-hclk");
582- if (IS_ERR(mdata->spi_hclk)) {
583- ret = PTR_ERR(mdata->spi_hclk);
584- dev_err(&pdev->dev, "failed to get spi-hclk: %d\n", ret);
585- goto err_put_master;
586- }
587-
588- ret = clk_prepare_enable(mdata->spi_hclk);
589- if (ret < 0) {
590- dev_err(&pdev->dev, "failed to enable spi_hclk (%d)\n", ret);
591- goto err_put_master;
592- }
593+ mdata->spi_hclk = devm_clk_get_optional(dev, "spi-hclk");
594+ if (IS_ERR(mdata->spi_hclk)) {
595+ ret = PTR_ERR(mdata->spi_hclk);
596+ dev_err(dev, "failed to get spi-hclk: %d\n", ret);
597+ goto err_put_master;
598 }
599
600- ret = clk_prepare_enable(mdata->spi_clk);
601+ ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
602 if (ret < 0) {
603- dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
604+ dev_err(dev, "failed to clk_set_parent (%d)\n", ret);
605 goto err_put_master;
606 }
607-
608- ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
609+
610+ ret = clk_prepare_enable(mdata->spi_hclk);
611 if (ret < 0) {
612- dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
613- clk_disable_unprepare(mdata->spi_clk);
614+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
615 goto err_put_master;
616 }
617
618- clk_disable_unprepare(mdata->spi_clk);
619-
620- if (mdata->dev_comp->need_ahb_clk)
621+ ret = clk_prepare_enable(mdata->spi_clk);
622+ if (ret < 0) {
623 clk_disable_unprepare(mdata->spi_hclk);
624+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
625+ goto err_put_master;
626+ }
627
628- pm_runtime_enable(&pdev->dev);
629+ mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
630
631- ret = devm_spi_register_master(&pdev->dev, master);
632- if (ret) {
633- dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
634- goto err_disable_runtime_pm;
635+ if (mdata->dev_comp->no_need_unprepare) {
636+ clk_disable(mdata->spi_clk);
637+ clk_disable(mdata->spi_hclk);
638+ } else {
639+ clk_disable_unprepare(mdata->spi_clk);
640+ clk_disable_unprepare(mdata->spi_hclk);
641 }
642
643 if (mdata->dev_comp->need_pad_sel) {
644 if (mdata->pad_num != master->num_chipselect) {
645- dev_err(&pdev->dev,
646+ dev_err(dev,
647 "pad_num does not match num_chipselect(%d != %d)\n",
648 mdata->pad_num, master->num_chipselect);
649 ret = -EINVAL;
650- goto err_disable_runtime_pm;
651+ goto err_put_master;
652 }
653
654- if (!master->cs_gpios && master->num_chipselect > 1) {
655- dev_err(&pdev->dev,
656+ if (!master->cs_gpiods && master->num_chipselect > 1) {
657+ dev_err(dev,
658 "cs_gpios not specified and num_chipselect > 1\n");
659 ret = -EINVAL;
660- goto err_disable_runtime_pm;
661+ goto err_put_master;
662 }
663
664- if (master->cs_gpios) {
665- for (i = 0; i < master->num_chipselect; i++) {
666- ret = devm_gpio_request(&pdev->dev,
667- master->cs_gpios[i],
668- dev_name(&pdev->dev));
669- if (ret) {
670- dev_err(&pdev->dev,
671- "can't get CS GPIO %i\n", i);
672- goto err_disable_runtime_pm;
673- }
674- }
675- }
676 }
677
678 if (mdata->dev_comp->dma_ext)
679 addr_bits = DMA_ADDR_EXT_BITS;
680 else
681 addr_bits = DMA_ADDR_DEF_BITS;
682- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
683+ ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
684 if (ret)
685- dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
686+ dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
687 addr_bits, ret);
688
689+ pm_runtime_enable(dev);
690+
691+ ret = devm_spi_register_master(dev, master);
692+ if (ret) {
693+ dev_err(dev, "failed to register master (%d)\n", ret);
694+ goto err_disable_runtime_pm;
695+ }
696+
697 return 0;
698
699 err_disable_runtime_pm:
700- pm_runtime_disable(&pdev->dev);
701+ pm_runtime_disable(dev);
702 err_put_master:
703 spi_master_put(master);
704
705@@ -1229,11 +1253,22 @@ static int mtk_spi_remove(struct platfor
706 {
707 struct spi_master *master = platform_get_drvdata(pdev);
708 struct mtk_spi *mdata = spi_master_get_devdata(master);
709+ int ret;
710
711- pm_runtime_disable(&pdev->dev);
712+ ret = pm_runtime_resume_and_get(&pdev->dev);
713+ if (ret < 0)
714+ return ret;
715
716 mtk_spi_reset(mdata);
717
718+ if (mdata->dev_comp->no_need_unprepare) {
719+ clk_unprepare(mdata->spi_clk);
720+ clk_unprepare(mdata->spi_hclk);
721+ }
722+
723+ pm_runtime_put_noidle(&pdev->dev);
724+ pm_runtime_disable(&pdev->dev);
725+
726 return 0;
727 }
728
729@@ -1250,8 +1285,7 @@ static int mtk_spi_suspend(struct device
730
731 if (!pm_runtime_suspended(dev)) {
732 clk_disable_unprepare(mdata->spi_clk);
733- if (mdata->dev_comp->need_ahb_clk)
734- clk_disable_unprepare(mdata->spi_hclk);
735+ clk_disable_unprepare(mdata->spi_hclk);
736 }
737
738 return ret;
739@@ -1264,26 +1298,24 @@ static int mtk_spi_resume(struct device
740 struct mtk_spi *mdata = spi_master_get_devdata(master);
741
742 if (!pm_runtime_suspended(dev)) {
743- if (mdata->dev_comp->need_ahb_clk) {
744- ret = clk_prepare_enable(mdata->spi_hclk);
745- if (ret < 0) {
746- dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
747- return ret;
748- }
749- }
750-
751 ret = clk_prepare_enable(mdata->spi_clk);
752 if (ret < 0) {
753 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
754 return ret;
755 }
756+
757+ ret = clk_prepare_enable(mdata->spi_hclk);
758+ if (ret < 0) {
759+ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
760+ clk_disable_unprepare(mdata->spi_clk);
761+ return ret;
762+ }
763 }
764
765 ret = spi_master_resume(master);
766 if (ret < 0) {
767 clk_disable_unprepare(mdata->spi_clk);
768- if (mdata->dev_comp->need_ahb_clk)
769- clk_disable_unprepare(mdata->spi_hclk);
770+ clk_disable_unprepare(mdata->spi_hclk);
771 }
772
773 return ret;
774@@ -1296,10 +1328,13 @@ static int mtk_spi_runtime_suspend(struc
775 struct spi_master *master = dev_get_drvdata(dev);
776 struct mtk_spi *mdata = spi_master_get_devdata(master);
777
778- clk_disable_unprepare(mdata->spi_clk);
779-
780- if (mdata->dev_comp->need_ahb_clk)
781+ if (mdata->dev_comp->no_need_unprepare) {
782+ clk_disable(mdata->spi_clk);
783+ clk_disable(mdata->spi_hclk);
784+ } else {
785+ clk_disable_unprepare(mdata->spi_clk);
786 clk_disable_unprepare(mdata->spi_hclk);
787+ }
788
789 return 0;
790 }
791@@ -1310,18 +1345,31 @@ static int mtk_spi_runtime_resume(struct
792 struct mtk_spi *mdata = spi_master_get_devdata(master);
793 int ret;
794
795- if (mdata->dev_comp->need_ahb_clk) {
796- ret = clk_prepare_enable(mdata->spi_hclk);
797+ if (mdata->dev_comp->no_need_unprepare) {
798+ ret = clk_enable(mdata->spi_clk);
799+ if (ret < 0) {
800+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
801+ return ret;
802+ }
803+ ret = clk_enable(mdata->spi_hclk);
804 if (ret < 0) {
805 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
806+ clk_disable(mdata->spi_clk);
807+ return ret;
808+ }
809+ } else {
810+ ret = clk_prepare_enable(mdata->spi_clk);
811+ if (ret < 0) {
812+ dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
813 return ret;
814 }
815- }
816
817- ret = clk_prepare_enable(mdata->spi_clk);
818- if (ret < 0) {
819- dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
820- return ret;
821+ ret = clk_prepare_enable(mdata->spi_hclk);
822+ if (ret < 0) {
823+ dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
824+ clk_disable_unprepare(mdata->spi_clk);
825+ return ret;
826+ }
827 }
828
829 return 0;