blob: b8b2dc701e1909ec3f7a85b85d37d390cd078430 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001From 675b477b2a50b2fb97f35944756f89644bf70092 Mon Sep 17 00:00:00 2001
2From: Qii Wang <qii.wang@mediatek.com>
3Date: Tue, 5 Jan 2021 16:48:39 +0800
4Subject: [PATCH] spi: mediatek: support IPM Design
5
6[Description]
71. support sigle mode;
82. support dual/quad mode with spi-mem framework.
9
10Signed-off-by: Leilk Liu <leilk.liu@mediatek.com>
11Reviewed-by: Qii Wang <qii.wang@mediatek.com>
12---
13 drivers/spi/spi-mt65xx.c | 395 +++++++++++++++++++++--
14 include/linux/platform_data/spi-mt65xx.h | 2 +-
15 2 files changed, 370 insertions(+), 27 deletions(-)
16
17diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
18index 8acf24f7c..9183c64e4 100644
19--- a/drivers/spi/spi-mt65xx.c
20+++ b/drivers/spi/spi-mt65xx.c
21@@ -17,6 +17,7 @@
22 #include <linux/platform_data/spi-mt65xx.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
25+#include <linux/spi/spi-mem.h>
26 #include <linux/dma-mapping.h>
27
28 #define SPI_CFG0_REG 0x0000
29@@ -31,6 +32,7 @@
30 #define SPI_CFG2_REG 0x0028
31 #define SPI_TX_SRC_REG_64 0x002c
32 #define SPI_RX_DST_REG_64 0x0030
33+#define SPI_CFG3_IPM_REG 0x0040
34
35 #define SPI_CFG0_SCK_HIGH_OFFSET 0
36 #define SPI_CFG0_SCK_LOW_OFFSET 8
37@@ -42,13 +44,15 @@
38 #define SPI_CFG1_CS_IDLE_OFFSET 0
39 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
40 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
41-#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
42+#define SPI_CFG1_GET_TICKDLY_OFFSET 29
43
44+#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
45 #define SPI_CFG1_CS_IDLE_MASK 0xff
46 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
47 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
48+#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
49 #define SPI_CFG2_SCK_HIGH_OFFSET 0
50-#define SPI_CFG2_SCK_LOW_OFFSET 16
51+#define SPI_CFG2_SCK_LOW_OFFSET 16
52
53 #define SPI_CMD_ACT BIT(0)
54 #define SPI_CMD_RESUME BIT(1)
55@@ -67,6 +71,25 @@
56 #define SPI_CMD_TX_ENDIAN BIT(15)
57 #define SPI_CMD_FINISH_IE BIT(16)
58 #define SPI_CMD_PAUSE_IE BIT(17)
59+#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
60+#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
61+#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
62+
63+#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
64+
65+#define PIN_MODE_CFG(x) ((x) / 2)
66+
67+#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
68+#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
69+#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
70+#define SPI_CFG3_IPM_XMODE_EN BIT(4)
71+#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
72+#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
73+#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
74+
75+#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
76+#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
77+#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
78
79 #define MT8173_SPI_MAX_PAD_SEL 3
80
81@@ -77,6 +100,9 @@
82
83 #define MTK_SPI_MAX_FIFO_SIZE 32U
84 #define MTK_SPI_PACKET_SIZE 1024
85+#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
86+#define MTK_SPI_IPM_PACKET_LOOP SZ_256
87+
88 #define MTK_SPI_32BITS_MASK (0xffffffff)
89
90 #define DMA_ADDR_EXT_BITS (36)
91@@ -90,6 +116,9 @@ struct mtk_spi_compatible {
92 bool enhance_timing;
93 /* some IC support DMA addr extension */
94 bool dma_ext;
95+ /* the IPM IP design improve some feature, and support dual/quad mode */
96+ bool ipm_design;
97+ bool support_quad;
98 };
99
100 struct mtk_spi {
101@@ -104,6 +133,12 @@ struct mtk_spi {
102 struct scatterlist *tx_sgl, *rx_sgl;
103 u32 tx_sgl_len, rx_sgl_len;
104 const struct mtk_spi_compatible *dev_comp;
105+
106+ struct completion spimem_done;
107+ bool use_spimem;
108+ struct device *dev;
109+ dma_addr_t tx_dma;
110+ dma_addr_t rx_dma;
111 };
112
113 static const struct mtk_spi_compatible mtk_common_compat;
114@@ -112,6 +147,14 @@ static const struct mtk_spi_compatible mt2712_compat = {
115 .must_tx = true,
116 };
117
118+static const struct mtk_spi_compatible ipm_compat = {
119+ .must_tx = true,
120+ .enhance_timing = true,
121+ .dma_ext = true,
122+ .ipm_design = true,
123+ .support_quad = true,
124+};
125+
126 static const struct mtk_spi_compatible mt6765_compat = {
127 .need_pad_sel = true,
128 .must_tx = true,
129@@ -140,11 +183,14 @@ static const struct mtk_spi_compatible mt8183_compat = {
130 * supplies it.
131 */
132 static const struct mtk_chip_config mtk_default_chip_info = {
133- .cs_pol = 0,
134 .sample_sel = 0,
135+ .get_tick_dly = 0,
136 };
137
138 static const struct of_device_id mtk_spi_of_match[] = {
139+ { .compatible = "mediatek,ipm-spi",
140+ .data = (void *)&ipm_compat,
141+ },
142 { .compatible = "mediatek,mt2701-spi",
143 .data = (void *)&mtk_common_compat,
144 },
145@@ -190,19 +236,48 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
146 writel(reg_val, mdata->base + SPI_CMD_REG);
147 }
148
149-static int mtk_spi_prepare_message(struct spi_master *master,
150- struct spi_message *msg)
151+static int mtk_spi_hw_init(struct spi_master *master,
152+ struct spi_device *spi)
153 {
154 u16 cpha, cpol;
155 u32 reg_val;
156- struct spi_device *spi = msg->spi;
157 struct mtk_chip_config *chip_config = spi->controller_data;
158 struct mtk_spi *mdata = spi_master_get_devdata(master);
159
160 cpha = spi->mode & SPI_CPHA ? 1 : 0;
161 cpol = spi->mode & SPI_CPOL ? 1 : 0;
162
163+ if (mdata->dev_comp->enhance_timing) {
164+ if (mdata->dev_comp->ipm_design) {
165+ /* CFG3 reg only used for spi-mem,
166+ * here write to default value
167+ */
168+ writel(0x0, mdata->base + SPI_CFG3_IPM_REG);
169+
170+ reg_val = readl(mdata->base + SPI_CMD_REG);
171+ reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
172+ reg_val |= chip_config->get_tick_dly
173+ << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
174+ writel(reg_val, mdata->base + SPI_CMD_REG);
175+ } else {
176+ reg_val = readl(mdata->base + SPI_CFG1_REG);
177+ reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
178+ reg_val |= chip_config->get_tick_dly
179+ << SPI_CFG1_GET_TICKDLY_OFFSET;
180+ writel(reg_val, mdata->base + SPI_CFG1_REG);
181+ }
182+ }
183+
184 reg_val = readl(mdata->base + SPI_CMD_REG);
185+ if (mdata->dev_comp->ipm_design) {
186+ /* SPI transfer without idle time until packet length done */
187+ reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
188+ if (spi->mode & SPI_LOOP)
189+ reg_val |= SPI_CMD_IPM_SPIM_LOOP;
190+ else
191+ reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
192+ }
193+
194 if (cpha)
195 reg_val |= SPI_CMD_CPHA;
196 else
197@@ -231,10 +306,12 @@ static int mtk_spi_prepare_message(struct spi_master *master,
198 #endif
199
200 if (mdata->dev_comp->enhance_timing) {
201- if (chip_config->cs_pol)
202+ /* set CS polarity */
203+ if (spi->mode & SPI_CS_HIGH)
204 reg_val |= SPI_CMD_CS_POL;
205 else
206 reg_val &= ~SPI_CMD_CS_POL;
207+
208 if (chip_config->sample_sel)
209 reg_val |= SPI_CMD_SAMPLE_SEL;
210 else
211@@ -260,11 +337,20 @@ static int mtk_spi_prepare_message(struct spi_master *master,
212 return 0;
213 }
214
215+static int mtk_spi_prepare_message(struct spi_master *master,
216+ struct spi_message *msg)
217+{
218+ return mtk_spi_hw_init(master, msg->spi);
219+}
220+
221 static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
222 {
223 u32 reg_val;
224 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
225
226+ if (spi->mode & SPI_CS_HIGH)
227+ enable = !enable;
228+
229 reg_val = readl(mdata->base + SPI_CMD_REG);
230 if (!enable) {
231 reg_val |= SPI_CMD_PAUSE_EN;
232@@ -278,14 +364,14 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
233 }
234
235 static void mtk_spi_prepare_transfer(struct spi_master *master,
236- struct spi_transfer *xfer)
237+ u32 speed_hz)
238 {
239 u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
240 struct mtk_spi *mdata = spi_master_get_devdata(master);
241
242 spi_clk_hz = clk_get_rate(mdata->spi_clk);
243- if (xfer->speed_hz < spi_clk_hz / 2)
244- div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
245+ if (speed_hz < spi_clk_hz / 2)
246+ div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
247 else
248 div = 1;
249
250@@ -323,12 +409,24 @@ static void mtk_spi_setup_packet(struct spi_master *master)
251 u32 packet_size, packet_loop, reg_val;
252 struct mtk_spi *mdata = spi_master_get_devdata(master);
253
254- packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
255+ if (mdata->dev_comp->ipm_design)
256+ packet_size = min_t(u32,
257+ mdata->xfer_len,
258+ MTK_SPI_IPM_PACKET_SIZE);
259+ else
260+ packet_size = min_t(u32,
261+ mdata->xfer_len,
262+ MTK_SPI_PACKET_SIZE);
263+
264 packet_loop = mdata->xfer_len / packet_size;
265
266 reg_val = readl(mdata->base + SPI_CFG1_REG);
267- reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
268+ if (mdata->dev_comp->ipm_design)
269+ reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
270+ else
271+ reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
272 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
273+ reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
274 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
275 writel(reg_val, mdata->base + SPI_CFG1_REG);
276 }
277@@ -423,7 +521,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
278 mdata->cur_transfer = xfer;
279 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
280 mdata->num_xfered = 0;
281- mtk_spi_prepare_transfer(master, xfer);
282+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
283 mtk_spi_setup_packet(master);
284
285 cnt = xfer->len / 4;
286@@ -455,7 +553,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
287 mdata->cur_transfer = xfer;
288 mdata->num_xfered = 0;
289
290- mtk_spi_prepare_transfer(master, xfer);
291+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
292
293 cmd = readl(mdata->base + SPI_CMD_REG);
294 if (xfer->tx_buf)
295@@ -532,6 +630,13 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
296 else
297 mdata->state = MTK_SPI_IDLE;
298
299+ /* SPI-MEM ops */
300+ if (mdata->use_spimem) {
301+ complete(&mdata->spimem_done);
302+
303+ return IRQ_HANDLED;
304+ }
305+
306 if (!master->can_dma(master, master->cur_msg->spi, trans)) {
307 if (trans->rx_buf) {
308 cnt = mdata->xfer_len / 4;
309@@ -615,12 +720,241 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
310 return IRQ_HANDLED;
311 }
312
313+static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
314+ const struct spi_mem_op *op)
315+{
316+ if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
317+ op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
318+ return false;
319+
320+ if (op->addr.nbytes && op->dummy.nbytes &&
321+ op->addr.buswidth != op->dummy.buswidth)
322+ return false;
323+
324+ if (op->addr.nbytes + op->dummy.nbytes > 16)
325+ return false;
326+
327+ if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
328+ if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
329+ MTK_SPI_IPM_PACKET_LOOP ||
330+ op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
331+ return false;
332+ }
333+
334+ if (op->data.dir == SPI_MEM_DATA_IN &&
335+ !IS_ALIGNED((size_t)op->data.buf.in, 4))
336+ return false;
337+
338+ return true;
339+}
340+
341+static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
342+ const struct spi_mem_op *op)
343+{
344+ struct mtk_spi *mdata = spi_master_get_devdata(master);
345+
346+ writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
347+ mdata->base + SPI_TX_SRC_REG);
348+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
349+ if (mdata->dev_comp->dma_ext)
350+ writel((u32)(mdata->tx_dma >> 32),
351+ mdata->base + SPI_TX_SRC_REG_64);
352+#endif
353+
354+ if (op->data.dir == SPI_MEM_DATA_IN) {
355+ writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
356+ mdata->base + SPI_RX_DST_REG);
357+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
358+ if (mdata->dev_comp->dma_ext)
359+ writel((u32)(mdata->rx_dma >> 32),
360+ mdata->base + SPI_RX_DST_REG_64);
361+#endif
362+ }
363+}
364+
365+static int mtk_spi_transfer_wait(struct spi_mem *mem,
366+ const struct spi_mem_op *op)
367+{
368+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
369+ unsigned long long ms = 1;
370+
371+ if (op->data.dir == SPI_MEM_NO_DATA)
372+ ms = 8LL * 1000LL * 32;
373+ else
374+ ms = 8LL * 1000LL * op->data.nbytes;
375+ do_div(ms, mem->spi->max_speed_hz);
376+ ms += ms + 1000; /* 1s tolerance */
377+
378+ if (ms > UINT_MAX)
379+ ms = UINT_MAX;
380+
381+ if (!wait_for_completion_timeout(&mdata->spimem_done,
382+ msecs_to_jiffies(ms))) {
383+ dev_err(mdata->dev, "spi-mem transfer timeout\n");
384+ return -ETIMEDOUT;
385+ }
386+
387+ return 0;
388+}
389+
390+static int mtk_spi_mem_exec_op(struct spi_mem *mem,
391+ const struct spi_mem_op *op)
392+{
393+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
394+ u32 reg_val, nio = 1, tx_size;
395+ char *tx_tmp_buf;
396+ int ret = 0;
397+
398+ mdata->use_spimem = true;
399+ reinit_completion(&mdata->spimem_done);
400+
401+ mtk_spi_reset(mdata);
402+ mtk_spi_hw_init(mem->spi->master, mem->spi);
403+ mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
404+
405+ reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
406+ /* opcode byte len */
407+ reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
408+ reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
409+
410+ /* addr & dummy byte len */
411+ reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
412+ if (op->addr.nbytes || op->dummy.nbytes)
413+ reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
414+ SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
415+
416+ /* data byte len */
417+ if (op->data.dir == SPI_MEM_NO_DATA) {
418+ reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
419+ writel(0, mdata->base + SPI_CFG1_REG);
420+ } else {
421+ reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
422+ mdata->xfer_len = op->data.nbytes;
423+ mtk_spi_setup_packet(mem->spi->master);
424+ }
425+
426+ if (op->addr.nbytes || op->dummy.nbytes) {
427+ if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
428+ reg_val |= SPI_CFG3_IPM_XMODE_EN;
429+ else
430+ reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
431+ }
432+
433+ if (op->addr.buswidth == 2 ||
434+ op->dummy.buswidth == 2 ||
435+ op->data.buswidth == 2)
436+ nio = 2;
437+ else if (op->addr.buswidth == 4 ||
438+ op->dummy.buswidth == 4 ||
439+ op->data.buswidth == 4)
440+ nio = 4;
441+
442+ reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
443+ reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
444+
445+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
446+ if (op->data.dir == SPI_MEM_DATA_IN)
447+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
448+ else
449+ reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
450+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
451+
452+ tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
453+ if (op->data.dir == SPI_MEM_DATA_OUT)
454+ tx_size += op->data.nbytes;
455+
456+ tx_size = max(tx_size, (u32)32);
457+
458+ tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
459+ if (!tx_tmp_buf)
460+ return -ENOMEM;
461+
462+ tx_tmp_buf[0] = op->cmd.opcode;
463+
464+ if (op->addr.nbytes) {
465+ int i;
466+
467+ for (i = 0; i < op->addr.nbytes; i++)
468+ tx_tmp_buf[i + 1] = op->addr.val >>
469+ (8 * (op->addr.nbytes - i - 1));
470+ }
471+
472+ if (op->dummy.nbytes)
473+ memset(tx_tmp_buf + op->addr.nbytes + 1,
474+ 0xff,
475+ op->dummy.nbytes);
476+
477+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
478+ memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
479+ op->data.buf.out,
480+ op->data.nbytes);
481+
482+ mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
483+ tx_size, DMA_TO_DEVICE);
484+ if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
485+ ret = -ENOMEM;
486+ goto err_exit;
487+ }
488+
489+ if (op->data.dir == SPI_MEM_DATA_IN) {
490+ mdata->rx_dma = dma_map_single(mdata->dev,
491+ op->data.buf.in,
492+ op->data.nbytes,
493+ DMA_FROM_DEVICE);
494+ if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
495+ ret = -ENOMEM;
496+ goto unmap_tx_dma;
497+ }
498+ }
499+
500+ reg_val = readl(mdata->base + SPI_CMD_REG);
501+ reg_val |= SPI_CMD_TX_DMA;
502+ if (op->data.dir == SPI_MEM_DATA_IN)
503+ reg_val |= SPI_CMD_RX_DMA;
504+ writel(reg_val, mdata->base + SPI_CMD_REG);
505+
506+ mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
507+
508+ mtk_spi_enable_transfer(mem->spi->master);
509+
510+ /* Wait for the interrupt. */
511+ ret = mtk_spi_transfer_wait(mem, op);
512+ if (ret)
513+ goto unmap_rx_dma;
514+
515+ /* spi disable dma */
516+ reg_val = readl(mdata->base + SPI_CMD_REG);
517+ reg_val &= ~SPI_CMD_TX_DMA;
518+ if (op->data.dir == SPI_MEM_DATA_IN)
519+ reg_val &= ~SPI_CMD_RX_DMA;
520+ writel(reg_val, mdata->base + SPI_CMD_REG);
521+
522+ if (op->data.dir == SPI_MEM_DATA_IN)
523+ dma_unmap_single(mdata->dev, mdata->rx_dma,
524+ op->data.nbytes, DMA_FROM_DEVICE);
525+unmap_rx_dma:
526+ dma_unmap_single(mdata->dev, mdata->rx_dma,
527+ op->data.nbytes, DMA_FROM_DEVICE);
528+unmap_tx_dma:
529+ dma_unmap_single(mdata->dev, mdata->tx_dma,
530+ tx_size, DMA_TO_DEVICE);
531+err_exit:
532+ kfree(tx_tmp_buf);
533+ mdata->use_spimem = false;
534+
535+ return ret;
536+}
537+
538+static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
539+ .supports_op = mtk_spi_mem_supports_op,
540+ .exec_op = mtk_spi_mem_exec_op,
541+};
542+
543 static int mtk_spi_probe(struct platform_device *pdev)
544 {
545 struct spi_master *master;
546 struct mtk_spi *mdata;
547 const struct of_device_id *of_id;
548- struct resource *res;
549 int i, irq, ret, addr_bits;
550
551 master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
552@@ -629,7 +963,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
553 return -ENOMEM;
554 }
555
556- master->auto_runtime_pm = true;
557+// master->auto_runtime_pm = true;
558 master->dev.of_node = pdev->dev.of_node;
559 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
560
561@@ -648,9 +982,25 @@ static int mtk_spi_probe(struct platform_device *pdev)
562
563 mdata = spi_master_get_devdata(master);
564 mdata->dev_comp = of_id->data;
565+
566+ if (mdata->dev_comp->enhance_timing)
567+ master->mode_bits |= SPI_CS_HIGH;
568+
569 if (mdata->dev_comp->must_tx)
570 master->flags = SPI_MASTER_MUST_TX;
571
572+ if (mdata->dev_comp->ipm_design)
573+ master->mode_bits |= SPI_LOOP;
574+
575+ if (mdata->dev_comp->support_quad) {
576+ master->mem_ops = &mtk_spi_mem_ops;
577+ master->mode_bits |= SPI_RX_DUAL | SPI_TX_DUAL |
578+ SPI_RX_QUAD | SPI_TX_QUAD;
579+
580+ mdata->dev = &pdev->dev;
581+ init_completion(&mdata->spimem_done);
582+ }
583+
584 if (mdata->dev_comp->need_pad_sel) {
585 mdata->pad_num = of_property_count_u32_elems(
586 pdev->dev.of_node,
587@@ -683,15 +1033,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
588 }
589
590 platform_set_drvdata(pdev, master);
591-
592- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
593- if (!res) {
594- ret = -ENODEV;
595- dev_err(&pdev->dev, "failed to determine base address\n");
596- goto err_put_master;
597- }
598-
599- mdata->base = devm_ioremap_resource(&pdev->dev, res);
600+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
601 if (IS_ERR(mdata->base)) {
602 ret = PTR_ERR(mdata->base);
603 goto err_put_master;
604@@ -713,6 +1055,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
605 goto err_put_master;
606 }
607
608+/*
609 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
610 if (IS_ERR(mdata->parent_clk)) {
611 ret = PTR_ERR(mdata->parent_clk);
612@@ -750,7 +1093,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
613 clk_disable_unprepare(mdata->spi_clk);
614
615 pm_runtime_enable(&pdev->dev);
616-
617+*/
618 ret = devm_spi_register_master(&pdev->dev, master);
619 if (ret) {
620 dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
621diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
622index f0e6d6483..fae9bc15c 100644
623--- a/include/linux/platform_data/spi-mt65xx.h
624+++ b/include/linux/platform_data/spi-mt65xx.h
625@@ -11,7 +11,7 @@
626
627 /* Board specific platform_data */
628 struct mtk_chip_config {
629- u32 cs_pol;
630 u32 sample_sel;
631+ u32 get_tick_dly;
632 };
633 #endif
634--
6352.17.1
636