blob: 2d2aeaa000d3155a3aac25c26da6e305f4a5edd1 [file] [log] [blame]
developer5d148cb2023-06-02 13:08:11 +08001From 85e3059aee9943eddfd2b7c9fc83481751005c09 Mon Sep 17 00:00:00 2001
2From: Sam Shih <sam.shih@mediatek.com>
3Date: Fri, 2 Jun 2023 13:06:18 +0800
4Subject: [PATCH]
5 [spi-and-storage][999-2361-add-spimem-support-to-mtk-spi.patch]
developerfd40db22021-04-29 10:08:25 +08006
developerfd40db22021-04-29 10:08:25 +08007---
8 drivers/spi/spi-mt65xx.c | 395 +++++++++++++++++++++--
9 include/linux/platform_data/spi-mt65xx.h | 2 +-
10 2 files changed, 370 insertions(+), 27 deletions(-)
11
12diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
developer5d148cb2023-06-02 13:08:11 +080013index 29d44f5d5..dbb471769 100644
developerfd40db22021-04-29 10:08:25 +080014--- a/drivers/spi/spi-mt65xx.c
15+++ b/drivers/spi/spi-mt65xx.c
16@@ -17,6 +17,7 @@
17 #include <linux/platform_data/spi-mt65xx.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/spi/spi.h>
20+#include <linux/spi/spi-mem.h>
21 #include <linux/dma-mapping.h>
22
23 #define SPI_CFG0_REG 0x0000
24@@ -31,6 +32,7 @@
25 #define SPI_CFG2_REG 0x0028
26 #define SPI_TX_SRC_REG_64 0x002c
27 #define SPI_RX_DST_REG_64 0x0030
28+#define SPI_CFG3_IPM_REG 0x0040
29
30 #define SPI_CFG0_SCK_HIGH_OFFSET 0
31 #define SPI_CFG0_SCK_LOW_OFFSET 8
32@@ -42,13 +44,15 @@
33 #define SPI_CFG1_CS_IDLE_OFFSET 0
34 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
35 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
36-#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
37+#define SPI_CFG1_GET_TICKDLY_OFFSET 29
38
39+#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
40 #define SPI_CFG1_CS_IDLE_MASK 0xff
41 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
42 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
43+#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
44 #define SPI_CFG2_SCK_HIGH_OFFSET 0
45-#define SPI_CFG2_SCK_LOW_OFFSET 16
46+#define SPI_CFG2_SCK_LOW_OFFSET 16
47
48 #define SPI_CMD_ACT BIT(0)
49 #define SPI_CMD_RESUME BIT(1)
50@@ -67,6 +71,25 @@
51 #define SPI_CMD_TX_ENDIAN BIT(15)
52 #define SPI_CMD_FINISH_IE BIT(16)
53 #define SPI_CMD_PAUSE_IE BIT(17)
54+#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
55+#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
56+#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
57+
58+#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
59+
60+#define PIN_MODE_CFG(x) ((x) / 2)
61+
62+#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
63+#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
64+#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
65+#define SPI_CFG3_IPM_XMODE_EN BIT(4)
66+#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
67+#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
68+#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
69+
70+#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
71+#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
72+#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
73
74 #define MT8173_SPI_MAX_PAD_SEL 3
75
76@@ -77,6 +100,9 @@
77
78 #define MTK_SPI_MAX_FIFO_SIZE 32U
79 #define MTK_SPI_PACKET_SIZE 1024
80+#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
81+#define MTK_SPI_IPM_PACKET_LOOP SZ_256
82+
83 #define MTK_SPI_32BITS_MASK (0xffffffff)
84
85 #define DMA_ADDR_EXT_BITS (36)
86@@ -90,6 +116,9 @@ struct mtk_spi_compatible {
87 bool enhance_timing;
88 /* some IC support DMA addr extension */
89 bool dma_ext;
90+ /* the IPM IP design improve some feature, and support dual/quad mode */
91+ bool ipm_design;
92+ bool support_quad;
93 };
94
95 struct mtk_spi {
96@@ -104,6 +133,12 @@ struct mtk_spi {
97 struct scatterlist *tx_sgl, *rx_sgl;
98 u32 tx_sgl_len, rx_sgl_len;
99 const struct mtk_spi_compatible *dev_comp;
100+
101+ struct completion spimem_done;
102+ bool use_spimem;
103+ struct device *dev;
104+ dma_addr_t tx_dma;
105+ dma_addr_t rx_dma;
106 };
107
108 static const struct mtk_spi_compatible mtk_common_compat;
109@@ -112,6 +147,14 @@ static const struct mtk_spi_compatible mt2712_compat = {
110 .must_tx = true,
111 };
112
113+static const struct mtk_spi_compatible ipm_compat = {
114+ .must_tx = true,
115+ .enhance_timing = true,
116+ .dma_ext = true,
117+ .ipm_design = true,
118+ .support_quad = true,
119+};
120+
121 static const struct mtk_spi_compatible mt6765_compat = {
122 .need_pad_sel = true,
123 .must_tx = true,
124@@ -140,11 +183,14 @@ static const struct mtk_spi_compatible mt8183_compat = {
125 * supplies it.
126 */
127 static const struct mtk_chip_config mtk_default_chip_info = {
128- .cs_pol = 0,
129 .sample_sel = 0,
130+ .get_tick_dly = 0,
131 };
132
133 static const struct of_device_id mtk_spi_of_match[] = {
134+ { .compatible = "mediatek,ipm-spi",
135+ .data = (void *)&ipm_compat,
136+ },
137 { .compatible = "mediatek,mt2701-spi",
138 .data = (void *)&mtk_common_compat,
139 },
140@@ -190,19 +236,48 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
141 writel(reg_val, mdata->base + SPI_CMD_REG);
142 }
143
144-static int mtk_spi_prepare_message(struct spi_master *master,
145- struct spi_message *msg)
146+static int mtk_spi_hw_init(struct spi_master *master,
147+ struct spi_device *spi)
148 {
149 u16 cpha, cpol;
150 u32 reg_val;
151- struct spi_device *spi = msg->spi;
152 struct mtk_chip_config *chip_config = spi->controller_data;
153 struct mtk_spi *mdata = spi_master_get_devdata(master);
154
155 cpha = spi->mode & SPI_CPHA ? 1 : 0;
156 cpol = spi->mode & SPI_CPOL ? 1 : 0;
157
158+ if (mdata->dev_comp->enhance_timing) {
159+ if (mdata->dev_comp->ipm_design) {
160+ /* CFG3 reg only used for spi-mem,
161+ * here write to default value
162+ */
163+ writel(0x0, mdata->base + SPI_CFG3_IPM_REG);
164+
165+ reg_val = readl(mdata->base + SPI_CMD_REG);
166+ reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
167+ reg_val |= chip_config->get_tick_dly
168+ << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
169+ writel(reg_val, mdata->base + SPI_CMD_REG);
170+ } else {
171+ reg_val = readl(mdata->base + SPI_CFG1_REG);
172+ reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
173+ reg_val |= chip_config->get_tick_dly
174+ << SPI_CFG1_GET_TICKDLY_OFFSET;
175+ writel(reg_val, mdata->base + SPI_CFG1_REG);
176+ }
177+ }
178+
179 reg_val = readl(mdata->base + SPI_CMD_REG);
180+ if (mdata->dev_comp->ipm_design) {
181+ /* SPI transfer without idle time until packet length done */
182+ reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
183+ if (spi->mode & SPI_LOOP)
184+ reg_val |= SPI_CMD_IPM_SPIM_LOOP;
185+ else
186+ reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
187+ }
188+
189 if (cpha)
190 reg_val |= SPI_CMD_CPHA;
191 else
192@@ -231,10 +306,12 @@ static int mtk_spi_prepare_message(struct spi_master *master,
193 #endif
194
195 if (mdata->dev_comp->enhance_timing) {
196- if (chip_config->cs_pol)
197+ /* set CS polarity */
198+ if (spi->mode & SPI_CS_HIGH)
199 reg_val |= SPI_CMD_CS_POL;
200 else
201 reg_val &= ~SPI_CMD_CS_POL;
202+
203 if (chip_config->sample_sel)
204 reg_val |= SPI_CMD_SAMPLE_SEL;
205 else
206@@ -260,11 +337,20 @@ static int mtk_spi_prepare_message(struct spi_master *master,
207 return 0;
208 }
209
210+static int mtk_spi_prepare_message(struct spi_master *master,
211+ struct spi_message *msg)
212+{
213+ return mtk_spi_hw_init(master, msg->spi);
214+}
215+
216 static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
217 {
218 u32 reg_val;
219 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
220
221+ if (spi->mode & SPI_CS_HIGH)
222+ enable = !enable;
223+
224 reg_val = readl(mdata->base + SPI_CMD_REG);
225 if (!enable) {
226 reg_val |= SPI_CMD_PAUSE_EN;
227@@ -278,14 +364,14 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
228 }
229
230 static void mtk_spi_prepare_transfer(struct spi_master *master,
231- struct spi_transfer *xfer)
232+ u32 speed_hz)
233 {
234 u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
235 struct mtk_spi *mdata = spi_master_get_devdata(master);
236
237 spi_clk_hz = clk_get_rate(mdata->spi_clk);
238- if (xfer->speed_hz < spi_clk_hz / 2)
239- div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
240+ if (speed_hz < spi_clk_hz / 2)
241+ div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
242 else
243 div = 1;
244
245@@ -323,12 +409,24 @@ static void mtk_spi_setup_packet(struct spi_master *master)
246 u32 packet_size, packet_loop, reg_val;
247 struct mtk_spi *mdata = spi_master_get_devdata(master);
248
249- packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
250+ if (mdata->dev_comp->ipm_design)
251+ packet_size = min_t(u32,
252+ mdata->xfer_len,
253+ MTK_SPI_IPM_PACKET_SIZE);
254+ else
255+ packet_size = min_t(u32,
256+ mdata->xfer_len,
257+ MTK_SPI_PACKET_SIZE);
258+
259 packet_loop = mdata->xfer_len / packet_size;
260
261 reg_val = readl(mdata->base + SPI_CFG1_REG);
262- reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
263+ if (mdata->dev_comp->ipm_design)
264+ reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
265+ else
266+ reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
267 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
268+ reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
269 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
270 writel(reg_val, mdata->base + SPI_CFG1_REG);
271 }
272@@ -423,7 +521,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
273 mdata->cur_transfer = xfer;
274 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
275 mdata->num_xfered = 0;
276- mtk_spi_prepare_transfer(master, xfer);
277+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
278 mtk_spi_setup_packet(master);
279
developer5d148cb2023-06-02 13:08:11 +0800280 if (xfer->tx_buf) {
281@@ -456,7 +554,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
developerfd40db22021-04-29 10:08:25 +0800282 mdata->cur_transfer = xfer;
283 mdata->num_xfered = 0;
284
285- mtk_spi_prepare_transfer(master, xfer);
286+ mtk_spi_prepare_transfer(master, xfer->speed_hz);
287
288 cmd = readl(mdata->base + SPI_CMD_REG);
289 if (xfer->tx_buf)
developer5d148cb2023-06-02 13:08:11 +0800290@@ -533,6 +631,13 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
developerfd40db22021-04-29 10:08:25 +0800291 else
292 mdata->state = MTK_SPI_IDLE;
293
294+ /* SPI-MEM ops */
295+ if (mdata->use_spimem) {
296+ complete(&mdata->spimem_done);
297+
298+ return IRQ_HANDLED;
299+ }
300+
developer6b07dce2022-02-14 14:04:53 +0800301 if (!master->can_dma(master, NULL, trans)) {
developerfd40db22021-04-29 10:08:25 +0800302 if (trans->rx_buf) {
303 cnt = mdata->xfer_len / 4;
developer5d148cb2023-06-02 13:08:11 +0800304@@ -616,12 +721,241 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
developerfd40db22021-04-29 10:08:25 +0800305 return IRQ_HANDLED;
306 }
307
308+static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
309+ const struct spi_mem_op *op)
310+{
311+ if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
312+ op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
313+ return false;
314+
315+ if (op->addr.nbytes && op->dummy.nbytes &&
316+ op->addr.buswidth != op->dummy.buswidth)
317+ return false;
318+
319+ if (op->addr.nbytes + op->dummy.nbytes > 16)
320+ return false;
321+
322+ if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
323+ if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
324+ MTK_SPI_IPM_PACKET_LOOP ||
325+ op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
326+ return false;
327+ }
328+
329+ if (op->data.dir == SPI_MEM_DATA_IN &&
330+ !IS_ALIGNED((size_t)op->data.buf.in, 4))
331+ return false;
332+
333+ return true;
334+}
335+
336+static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
337+ const struct spi_mem_op *op)
338+{
339+ struct mtk_spi *mdata = spi_master_get_devdata(master);
340+
341+ writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
342+ mdata->base + SPI_TX_SRC_REG);
343+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
344+ if (mdata->dev_comp->dma_ext)
345+ writel((u32)(mdata->tx_dma >> 32),
346+ mdata->base + SPI_TX_SRC_REG_64);
347+#endif
348+
349+ if (op->data.dir == SPI_MEM_DATA_IN) {
350+ writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
351+ mdata->base + SPI_RX_DST_REG);
352+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
353+ if (mdata->dev_comp->dma_ext)
354+ writel((u32)(mdata->rx_dma >> 32),
355+ mdata->base + SPI_RX_DST_REG_64);
356+#endif
357+ }
358+}
359+
360+static int mtk_spi_transfer_wait(struct spi_mem *mem,
361+ const struct spi_mem_op *op)
362+{
363+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
364+ unsigned long long ms = 1;
365+
366+ if (op->data.dir == SPI_MEM_NO_DATA)
367+ ms = 8LL * 1000LL * 32;
368+ else
369+ ms = 8LL * 1000LL * op->data.nbytes;
370+ do_div(ms, mem->spi->max_speed_hz);
371+ ms += ms + 1000; /* 1s tolerance */
372+
373+ if (ms > UINT_MAX)
374+ ms = UINT_MAX;
375+
376+ if (!wait_for_completion_timeout(&mdata->spimem_done,
377+ msecs_to_jiffies(ms))) {
378+ dev_err(mdata->dev, "spi-mem transfer timeout\n");
379+ return -ETIMEDOUT;
380+ }
381+
382+ return 0;
383+}
384+
385+static int mtk_spi_mem_exec_op(struct spi_mem *mem,
386+ const struct spi_mem_op *op)
387+{
388+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
389+ u32 reg_val, nio = 1, tx_size;
390+ char *tx_tmp_buf;
391+ int ret = 0;
392+
393+ mdata->use_spimem = true;
394+ reinit_completion(&mdata->spimem_done);
395+
396+ mtk_spi_reset(mdata);
397+ mtk_spi_hw_init(mem->spi->master, mem->spi);
398+ mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
399+
400+ reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
401+ /* opcode byte len */
402+ reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
403+ reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
404+
405+ /* addr & dummy byte len */
406+ reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
407+ if (op->addr.nbytes || op->dummy.nbytes)
408+ reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
409+ SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
410+
411+ /* data byte len */
412+ if (op->data.dir == SPI_MEM_NO_DATA) {
413+ reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
414+ writel(0, mdata->base + SPI_CFG1_REG);
415+ } else {
416+ reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
417+ mdata->xfer_len = op->data.nbytes;
418+ mtk_spi_setup_packet(mem->spi->master);
419+ }
420+
421+ if (op->addr.nbytes || op->dummy.nbytes) {
422+ if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
423+ reg_val |= SPI_CFG3_IPM_XMODE_EN;
424+ else
425+ reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
426+ }
427+
428+ if (op->addr.buswidth == 2 ||
429+ op->dummy.buswidth == 2 ||
430+ op->data.buswidth == 2)
431+ nio = 2;
432+ else if (op->addr.buswidth == 4 ||
433+ op->dummy.buswidth == 4 ||
434+ op->data.buswidth == 4)
435+ nio = 4;
436+
437+ reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
438+ reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
439+
440+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
441+ if (op->data.dir == SPI_MEM_DATA_IN)
442+ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
443+ else
444+ reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
445+ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
446+
447+ tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
448+ if (op->data.dir == SPI_MEM_DATA_OUT)
449+ tx_size += op->data.nbytes;
450+
451+ tx_size = max(tx_size, (u32)32);
452+
453+ tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
454+ if (!tx_tmp_buf)
455+ return -ENOMEM;
456+
457+ tx_tmp_buf[0] = op->cmd.opcode;
458+
459+ if (op->addr.nbytes) {
460+ int i;
461+
462+ for (i = 0; i < op->addr.nbytes; i++)
463+ tx_tmp_buf[i + 1] = op->addr.val >>
464+ (8 * (op->addr.nbytes - i - 1));
465+ }
466+
467+ if (op->dummy.nbytes)
468+ memset(tx_tmp_buf + op->addr.nbytes + 1,
469+ 0xff,
470+ op->dummy.nbytes);
471+
472+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
473+ memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
474+ op->data.buf.out,
475+ op->data.nbytes);
476+
477+ mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
478+ tx_size, DMA_TO_DEVICE);
479+ if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
480+ ret = -ENOMEM;
481+ goto err_exit;
482+ }
483+
484+ if (op->data.dir == SPI_MEM_DATA_IN) {
485+ mdata->rx_dma = dma_map_single(mdata->dev,
486+ op->data.buf.in,
487+ op->data.nbytes,
488+ DMA_FROM_DEVICE);
489+ if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
490+ ret = -ENOMEM;
491+ goto unmap_tx_dma;
492+ }
493+ }
494+
495+ reg_val = readl(mdata->base + SPI_CMD_REG);
496+ reg_val |= SPI_CMD_TX_DMA;
497+ if (op->data.dir == SPI_MEM_DATA_IN)
498+ reg_val |= SPI_CMD_RX_DMA;
499+ writel(reg_val, mdata->base + SPI_CMD_REG);
500+
501+ mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
502+
503+ mtk_spi_enable_transfer(mem->spi->master);
504+
505+ /* Wait for the interrupt. */
506+ ret = mtk_spi_transfer_wait(mem, op);
507+ if (ret)
508+ goto unmap_rx_dma;
509+
510+ /* spi disable dma */
511+ reg_val = readl(mdata->base + SPI_CMD_REG);
512+ reg_val &= ~SPI_CMD_TX_DMA;
513+ if (op->data.dir == SPI_MEM_DATA_IN)
514+ reg_val &= ~SPI_CMD_RX_DMA;
515+ writel(reg_val, mdata->base + SPI_CMD_REG);
516+
517+ if (op->data.dir == SPI_MEM_DATA_IN)
518+ dma_unmap_single(mdata->dev, mdata->rx_dma,
519+ op->data.nbytes, DMA_FROM_DEVICE);
520+unmap_rx_dma:
521+ dma_unmap_single(mdata->dev, mdata->rx_dma,
522+ op->data.nbytes, DMA_FROM_DEVICE);
523+unmap_tx_dma:
524+ dma_unmap_single(mdata->dev, mdata->tx_dma,
525+ tx_size, DMA_TO_DEVICE);
526+err_exit:
527+ kfree(tx_tmp_buf);
528+ mdata->use_spimem = false;
529+
530+ return ret;
531+}
532+
533+static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
534+ .supports_op = mtk_spi_mem_supports_op,
535+ .exec_op = mtk_spi_mem_exec_op,
536+};
537+
538 static int mtk_spi_probe(struct platform_device *pdev)
539 {
540 struct spi_master *master;
541 struct mtk_spi *mdata;
542 const struct of_device_id *of_id;
543- struct resource *res;
544 int i, irq, ret, addr_bits;
545
546 master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
developer5d148cb2023-06-02 13:08:11 +0800547@@ -630,7 +964,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
developerfd40db22021-04-29 10:08:25 +0800548 return -ENOMEM;
549 }
550
551- master->auto_runtime_pm = true;
552+// master->auto_runtime_pm = true;
553 master->dev.of_node = pdev->dev.of_node;
554 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
555
developer5d148cb2023-06-02 13:08:11 +0800556@@ -649,9 +983,25 @@ static int mtk_spi_probe(struct platform_device *pdev)
developerfd40db22021-04-29 10:08:25 +0800557
558 mdata = spi_master_get_devdata(master);
559 mdata->dev_comp = of_id->data;
560+
561+ if (mdata->dev_comp->enhance_timing)
562+ master->mode_bits |= SPI_CS_HIGH;
563+
564 if (mdata->dev_comp->must_tx)
565 master->flags = SPI_MASTER_MUST_TX;
566
567+ if (mdata->dev_comp->ipm_design)
568+ master->mode_bits |= SPI_LOOP;
569+
570+ if (mdata->dev_comp->support_quad) {
571+ master->mem_ops = &mtk_spi_mem_ops;
572+ master->mode_bits |= SPI_RX_DUAL | SPI_TX_DUAL |
573+ SPI_RX_QUAD | SPI_TX_QUAD;
574+
575+ mdata->dev = &pdev->dev;
576+ init_completion(&mdata->spimem_done);
577+ }
578+
579 if (mdata->dev_comp->need_pad_sel) {
580 mdata->pad_num = of_property_count_u32_elems(
581 pdev->dev.of_node,
developer5d148cb2023-06-02 13:08:11 +0800582@@ -684,15 +1034,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
developerfd40db22021-04-29 10:08:25 +0800583 }
584
585 platform_set_drvdata(pdev, master);
586-
587- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
588- if (!res) {
589- ret = -ENODEV;
590- dev_err(&pdev->dev, "failed to determine base address\n");
591- goto err_put_master;
592- }
593-
594- mdata->base = devm_ioremap_resource(&pdev->dev, res);
595+ mdata->base = devm_platform_ioremap_resource(pdev, 0);
596 if (IS_ERR(mdata->base)) {
597 ret = PTR_ERR(mdata->base);
598 goto err_put_master;
developer5d148cb2023-06-02 13:08:11 +0800599@@ -714,6 +1056,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
developerfd40db22021-04-29 10:08:25 +0800600 goto err_put_master;
601 }
602
603+/*
604 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
605 if (IS_ERR(mdata->parent_clk)) {
606 ret = PTR_ERR(mdata->parent_clk);
developer5d148cb2023-06-02 13:08:11 +0800607@@ -751,7 +1094,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
developerfd40db22021-04-29 10:08:25 +0800608 clk_disable_unprepare(mdata->spi_clk);
609
610 pm_runtime_enable(&pdev->dev);
611-
612+*/
613 ret = devm_spi_register_master(&pdev->dev, master);
614 if (ret) {
615 dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
616diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
617index f0e6d6483..fae9bc15c 100644
618--- a/include/linux/platform_data/spi-mt65xx.h
619+++ b/include/linux/platform_data/spi-mt65xx.h
620@@ -11,7 +11,7 @@
621
622 /* Board specific platform_data */
623 struct mtk_chip_config {
624- u32 cs_pol;
625 u32 sample_sel;
626+ u32 get_tick_dly;
627 };
628 #endif
629--
developer5d148cb2023-06-02 13:08:11 +08006302.34.1
developerfd40db22021-04-29 10:08:25 +0800631