blob: b3672e5b15d43551f7c1ca293095ec0c80dcf078 [file] [log] [blame]
developerec4ebe42022-04-12 11:17:45 +08001From 1ecb38eabd90efe93957d0a822a167560c39308a Mon Sep 17 00:00:00 2001
2From: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
3Date: Wed, 20 Mar 2019 16:19:51 +0800
4Subject: [PATCH 6/6] spi: spi-mem: MediaTek: Add SPI NAND Flash interface
5 driver for MediaTek MT7622
6
7Change-Id: I3e78406bb9b46b0049d3988a5c71c7069e4f809c
8Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
9---
10 drivers/spi/Kconfig | 9 +
11 drivers/spi/Makefile | 1 +
12 drivers/spi/spi-mtk-snfi.c | 1183 ++++++++++++++++++++++++++++++++++++
13 3 files changed, 1193 insertions(+)
14 create mode 100644 drivers/spi/spi-mtk-snfi.c
15
16--- a/drivers/spi/Makefile
17+++ b/drivers/spi/Makefile
18@@ -60,6 +60,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mp
19 obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
20 obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
21 obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
22+obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
23 obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
24 obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
25 obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
26--- a/drivers/spi/Kconfig
27+++ b/drivers/spi/Kconfig
28@@ -427,6 +427,15 @@ config SPI_MT65XX
29 say Y or M here.If you are not sure, say N.
30 SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
31
32+config SPI_MTK_SNFI
33+ tristate "MediaTek SPI NAND interface"
34+ select MTD_SPI_NAND
35+ help
36+ This selects the SPI NAND FLASH interface(SNFI),
37+ which could be found on MediaTek Soc.
38+ Say Y or M here.If you are not sure, say N.
39+ Note Parallel Nand and SPI NAND is alternative on MediaTek SoCs.
40+
41 config SPI_MT7621
42 tristate "MediaTek MT7621 SPI Controller"
43 depends on RALINK || COMPILE_TEST
44--- /dev/null
45+++ b/drivers/spi/spi-mtk-snfi.c
46@@ -0,0 +1,1200 @@
47+// SPDX-License-Identifier: GPL-2.0
48+/*
49+ * Driver for MediaTek SPI Nand interface
50+ *
51+ * Copyright (C) 2018 MediaTek Inc.
52+ * Authors: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
53+ *
54+ */
55+
56+#include <linux/clk.h>
57+#include <linux/delay.h>
58+#include <linux/dma-mapping.h>
59+#include <linux/interrupt.h>
60+#include <linux/iopoll.h>
61+#include <linux/mtd/mtd.h>
62+#include <linux/mtd/mtk_ecc.h>
63+#include <linux/mtd/spinand.h>
64+#include <linux/module.h>
65+#include <linux/of.h>
66+#include <linux/of_device.h>
67+#include <linux/platform_device.h>
68+#include <linux/spi/spi.h>
69+#include <linux/spi/spi-mem.h>
70+
71+/* NAND controller register definition */
72+/* NFI control */
73+#define NFI_CNFG 0x00
74+#define CNFG_DMA BIT(0)
75+#define CNFG_READ_EN BIT(1)
76+#define CNFG_DMA_BURST_EN BIT(2)
77+#define CNFG_BYTE_RW BIT(6)
78+#define CNFG_HW_ECC_EN BIT(8)
79+#define CNFG_AUTO_FMT_EN BIT(9)
80+#define CNFG_OP_PROGRAM (3UL << 12)
81+#define CNFG_OP_CUST (6UL << 12)
82+#define NFI_PAGEFMT 0x04
83+#define PAGEFMT_512 0
84+#define PAGEFMT_2K 1
85+#define PAGEFMT_4K 2
86+#define PAGEFMT_FDM_SHIFT 8
87+#define PAGEFMT_FDM_ECC_SHIFT 12
88+#define NFI_CON 0x08
89+#define CON_FIFO_FLUSH BIT(0)
90+#define CON_NFI_RST BIT(1)
91+#define CON_BRD BIT(8)
92+#define CON_BWR BIT(9)
93+#define CON_SEC_SHIFT 12
94+#define NFI_INTR_EN 0x10
95+#define INTR_AHB_DONE_EN BIT(6)
96+#define NFI_INTR_STA 0x14
97+#define NFI_CMD 0x20
98+#define NFI_STA 0x60
99+#define STA_EMP_PAGE BIT(12)
100+#define NAND_FSM_MASK (0x1f << 24)
101+#define NFI_FSM_MASK (0xf << 16)
102+#define NFI_ADDRCNTR 0x70
103+#define CNTR_MASK GENMASK(16, 12)
104+#define ADDRCNTR_SEC_SHIFT 12
105+#define ADDRCNTR_SEC(val) \
106+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
107+#define NFI_STRADDR 0x80
108+#define NFI_BYTELEN 0x84
109+#define NFI_CSEL 0x90
110+#define NFI_FDML(x) (0xa0 + (x) * sizeof(u32) * 2)
111+#define NFI_FDMM(x) (0xa4 + (x) * sizeof(u32) * 2)
112+#define NFI_MASTER_STA 0x224
113+#define MASTER_STA_MASK 0x0fff
114+/* NFI_SPI control */
115+#define SNFI_MAC_OUTL 0x504
116+#define SNFI_MAC_INL 0x508
117+#define SNFI_RD_CTL2 0x510
118+#define RD_CMD_MASK 0x00ff
119+#define RD_DUMMY_SHIFT 8
120+#define SNFI_RD_CTL3 0x514
121+#define RD_ADDR_MASK 0xffff
122+#define SNFI_MISC_CTL 0x538
123+#define RD_MODE_X2 BIT(16)
124+#define RD_MODE_X4 (2UL << 16)
125+#define RD_QDUAL_IO (4UL << 16)
126+#define RD_MODE_MASK (7UL << 16)
127+#define RD_CUSTOM_EN BIT(6)
128+#define WR_CUSTOM_EN BIT(7)
129+#define WR_X4_EN BIT(20)
130+#define SW_RST BIT(28)
131+#define SNFI_MISC_CTL2 0x53c
132+#define WR_LEN_SHIFT 16
133+#define SNFI_PG_CTL1 0x524
134+#define WR_LOAD_CMD_SHIFT 8
135+#define SNFI_PG_CTL2 0x528
136+#define WR_LOAD_ADDR_MASK 0xffff
137+#define SNFI_MAC_CTL 0x500
138+#define MAC_WIP BIT(0)
139+#define MAC_WIP_READY BIT(1)
140+#define MAC_TRIG BIT(2)
141+#define MAC_EN BIT(3)
142+#define MAC_SIO_SEL BIT(4)
143+#define SNFI_STA_CTL1 0x550
144+#define SPI_STATE_IDLE 0xf
145+#define SNFI_CNFG 0x55c
146+#define SNFI_MODE_EN BIT(0)
147+#define SNFI_GPRAM_DATA 0x800
148+#define SNFI_GPRAM_MAX_LEN 16
149+
150+/* Dummy command trigger NFI to spi mode */
151+#define NAND_CMD_DUMMYREAD 0x00
152+#define NAND_CMD_DUMMYPROG 0x80
153+
154+#define MTK_TIMEOUT 500000
155+#define MTK_RESET_TIMEOUT 1000000
156+#define MTK_SNFC_MIN_SPARE 16
157+#define KB(x) ((x) * 1024UL)
158+
159+/*
160+ * supported spare size of each IP.
161+ * order should be the same with the spare size bitfiled defination of
162+ * register NFI_PAGEFMT.
163+ */
164+static const u8 spare_size_mt7622[] = {
165+ 16, 26, 27, 28
166+};
167+
168+struct mtk_snfi_caps {
169+ const u8 *spare_size;
170+ u8 num_spare_size;
171+ u32 nand_sec_size;
172+ u8 nand_fdm_size;
173+ u8 nand_fdm_ecc_size;
174+ u8 ecc_parity_bits;
175+ u8 pageformat_spare_shift;
176+ u8 bad_mark_swap;
177+};
178+
179+struct mtk_snfi_bad_mark_ctl {
180+ void (*bm_swap)(struct spi_mem *mem, u8 *buf, int raw);
181+ u32 sec;
182+ u32 pos;
183+};
184+
185+struct mtk_snfi_nand_chip {
186+ struct mtk_snfi_bad_mark_ctl bad_mark;
187+ u32 spare_per_sector;
188+};
189+
190+struct mtk_snfi_clk {
191+ struct clk *nfi_clk;
192+ struct clk *spi_clk;
193+};
194+
195+struct mtk_snfi {
196+ const struct mtk_snfi_caps *caps;
197+ struct mtk_snfi_nand_chip snfi_nand;
198+ struct mtk_snfi_clk clk;
199+ struct mtk_ecc_config ecc_cfg;
200+ struct mtk_ecc *ecc;
201+ struct completion done;
202+ struct device *dev;
203+
204+ void __iomem *regs;
205+
206+ u8 *buffer;
207+};
208+
209+static inline u8 *oob_ptr(struct spi_mem *mem, int i)
210+{
211+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
212+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
213+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
214+ u8 *poi;
215+
216+ /* map the sector's FDM data to free oob:
217+ * the beginning of the oob area stores the FDM data of bad mark
218+ */
219+
220+ if (i < snfi_nand->bad_mark.sec)
221+ poi = spinand->oobbuf + (i + 1) * snfi->caps->nand_fdm_size;
222+ else if (i == snfi_nand->bad_mark.sec)
223+ poi = spinand->oobbuf;
224+ else
225+ poi = spinand->oobbuf + i * snfi->caps->nand_fdm_size;
226+
227+ return poi;
228+}
229+
230+static inline int mtk_data_len(struct spi_mem *mem)
231+{
232+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
233+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
234+
235+ return snfi->caps->nand_sec_size + snfi_nand->spare_per_sector;
236+}
237+
238+static inline u8 *mtk_oob_ptr(struct spi_mem *mem,
239+ const u8 *p, int i)
240+{
241+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
242+
243+ return (u8 *)p + i * mtk_data_len(mem) + snfi->caps->nand_sec_size;
244+}
245+
246+static void mtk_snfi_bad_mark_swap(struct spi_mem *mem,
247+ u8 *buf, int raw)
248+{
249+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
250+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
251+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
252+ u32 bad_pos = snfi_nand->bad_mark.pos;
253+
254+ if (raw)
255+ bad_pos += snfi_nand->bad_mark.sec * mtk_data_len(mem);
256+ else
257+ bad_pos += snfi_nand->bad_mark.sec * snfi->caps->nand_sec_size;
258+
259+ swap(spinand->oobbuf[0], buf[bad_pos]);
260+}
261+
262+static void mtk_snfi_set_bad_mark_ctl(struct mtk_snfi_bad_mark_ctl *bm_ctl,
263+ struct spi_mem *mem)
264+{
265+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
266+ struct mtd_info *mtd = spinand_to_mtd(spinand);
267+
268+ bm_ctl->bm_swap = mtk_snfi_bad_mark_swap;
269+ bm_ctl->sec = mtd->writesize / mtk_data_len(mem);
270+ bm_ctl->pos = mtd->writesize % mtk_data_len(mem);
271+}
272+
273+static void mtk_snfi_mac_enable(struct mtk_snfi *snfi)
274+{
275+ u32 mac;
276+
277+ mac = readl(snfi->regs + SNFI_MAC_CTL);
278+ mac &= ~MAC_SIO_SEL;
279+ mac |= MAC_EN;
280+
281+ writel(mac, snfi->regs + SNFI_MAC_CTL);
282+}
283+
284+static int mtk_snfi_mac_trigger(struct mtk_snfi *snfi)
285+{
286+ u32 mac, reg;
287+ int ret = 0;
288+
289+ mac = readl(snfi->regs + SNFI_MAC_CTL);
290+ mac |= MAC_TRIG;
291+ writel(mac, snfi->regs + SNFI_MAC_CTL);
292+
293+ ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
294+ reg & MAC_WIP_READY, 10,
295+ MTK_TIMEOUT);
296+ if (ret < 0) {
297+ dev_err(snfi->dev, "polling wip ready for read timeout\n");
298+ return -EIO;
299+ }
300+
301+ ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
302+ !(reg & MAC_WIP), 10,
303+ MTK_TIMEOUT);
304+ if (ret < 0) {
305+ dev_err(snfi->dev, "polling flash update timeout\n");
306+ return -EIO;
307+ }
308+
309+ return ret;
310+}
311+
312+static void mtk_snfi_mac_leave(struct mtk_snfi *snfi)
313+{
314+ u32 mac;
315+
316+ mac = readl(snfi->regs + SNFI_MAC_CTL);
317+ mac &= ~(MAC_TRIG | MAC_EN | MAC_SIO_SEL);
318+ writel(mac, snfi->regs + SNFI_MAC_CTL);
319+}
320+
321+static int mtk_snfi_mac_op(struct mtk_snfi *snfi)
322+{
323+ int ret = 0;
324+
325+ mtk_snfi_mac_enable(snfi);
326+
327+ ret = mtk_snfi_mac_trigger(snfi);
328+ if (ret)
329+ return ret;
330+
331+ mtk_snfi_mac_leave(snfi);
332+
333+ return ret;
334+}
335+
336+static irqreturn_t mtk_snfi_irq(int irq, void *id)
337+{
338+ struct mtk_snfi *snfi = id;
339+ u16 sta, ien;
340+
341+ sta = readw(snfi->regs + NFI_INTR_STA);
342+ ien = readw(snfi->regs + NFI_INTR_EN);
343+
344+ if (!(sta & ien))
345+ return IRQ_NONE;
346+
347+ writew(~sta & ien, snfi->regs + NFI_INTR_EN);
348+ complete(&snfi->done);
349+
350+ return IRQ_HANDLED;
351+}
352+
353+static int mtk_snfi_enable_clk(struct device *dev, struct mtk_snfi_clk *clk)
354+{
355+ int ret;
356+
357+ ret = clk_prepare_enable(clk->nfi_clk);
358+ if (ret) {
359+ dev_err(dev, "failed to enable nfi clk\n");
360+ return ret;
361+ }
362+
363+ ret = clk_prepare_enable(clk->spi_clk);
364+ if (ret) {
365+ dev_err(dev, "failed to enable spi clk\n");
366+ clk_disable_unprepare(clk->nfi_clk);
367+ return ret;
368+ }
369+
370+ return 0;
371+}
372+
373+static void mtk_snfi_disable_clk(struct mtk_snfi_clk *clk)
374+{
375+ clk_disable_unprepare(clk->nfi_clk);
376+ clk_disable_unprepare(clk->spi_clk);
377+}
378+
379+static int mtk_snfi_reset(struct mtk_snfi *snfi)
380+{
381+ u32 val;
382+ int ret;
383+
384+ /* SW reset controller */
385+ val = readl(snfi->regs + SNFI_MISC_CTL) | SW_RST;
386+ writel(val, snfi->regs + SNFI_MISC_CTL);
387+
388+ ret = readw_poll_timeout(snfi->regs + SNFI_STA_CTL1, val,
389+ !(val & SPI_STATE_IDLE), 50,
390+ MTK_RESET_TIMEOUT);
391+ if (ret) {
392+ dev_warn(snfi->dev, "spi state active in reset [0x%x] = 0x%x\n",
393+ SNFI_STA_CTL1, val);
394+ return ret;
395+ }
396+
397+ val = readl(snfi->regs + SNFI_MISC_CTL);
398+ val &= ~SW_RST;
399+ writel(val, snfi->regs + SNFI_MISC_CTL);
400+
401+ /* reset all registers and force the NFI master to terminate */
402+ writew(CON_FIFO_FLUSH | CON_NFI_RST, snfi->regs + NFI_CON);
403+ ret = readw_poll_timeout(snfi->regs + NFI_STA, val,
404+ !(val & (NFI_FSM_MASK | NAND_FSM_MASK)), 50,
405+ MTK_RESET_TIMEOUT);
406+ if (ret) {
407+ dev_warn(snfi->dev, "nfi active in reset [0x%x] = 0x%x\n",
408+ NFI_STA, val);
409+ return ret;
410+ }
411+
412+ return 0;
413+}
414+
415+static int mtk_snfi_set_spare_per_sector(struct spinand_device *spinand,
416+ const struct mtk_snfi_caps *caps,
417+ u32 *sps)
418+{
419+ struct mtd_info *mtd = spinand_to_mtd(spinand);
420+ const u8 *spare = caps->spare_size;
421+ u32 sectors, i, closest_spare = 0;
422+
423+ sectors = mtd->writesize / caps->nand_sec_size;
424+ *sps = mtd->oobsize / sectors;
425+
426+ if (*sps < MTK_SNFC_MIN_SPARE)
427+ return -EINVAL;
428+
429+ for (i = 0; i < caps->num_spare_size; i++) {
430+ if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
431+ closest_spare = i;
432+ if (*sps == spare[i])
433+ break;
434+ }
435+ }
436+
437+ *sps = spare[closest_spare];
438+
439+ return 0;
440+}
441+
442+static void mtk_snfi_read_fdm_data(struct spi_mem *mem,
443+ u32 sectors)
444+{
445+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
446+ const struct mtk_snfi_caps *caps = snfi->caps;
447+ u32 vall, valm;
448+ int i, j;
449+ u8 *oobptr;
450+
451+ for (i = 0; i < sectors; i++) {
452+ oobptr = oob_ptr(mem, i);
453+ vall = readl(snfi->regs + NFI_FDML(i));
454+ valm = readl(snfi->regs + NFI_FDMM(i));
455+
456+ for (j = 0; j < caps->nand_fdm_size; j++)
457+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
458+ }
459+}
460+
461+static void mtk_snfi_write_fdm_data(struct spi_mem *mem,
462+ u32 sectors)
463+{
464+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
465+ const struct mtk_snfi_caps *caps = snfi->caps;
466+ u32 vall, valm;
467+ int i, j;
468+ u8 *oobptr;
469+
470+ for (i = 0; i < sectors; i++) {
471+ oobptr = oob_ptr(mem, i);
472+ vall = 0;
473+ valm = 0;
474+ for (j = 0; j < 8; j++) {
475+ if (j < 4)
476+ vall |= (j < caps->nand_fdm_size ? oobptr[j] :
477+ 0xff) << (j * 8);
478+ else
479+ valm |= (j < caps->nand_fdm_size ? oobptr[j] :
480+ 0xff) << ((j - 4) * 8);
481+ }
482+ writel(vall, snfi->regs + NFI_FDML(i));
483+ writel(valm, snfi->regs + NFI_FDMM(i));
484+ }
485+}
486+
487+static int mtk_snfi_update_ecc_stats(struct spi_mem *mem,
488+ u8 *buf, u32 sectors)
489+{
490+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
491+ struct mtd_info *mtd = spinand_to_mtd(spinand);
492+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
493+ struct mtk_ecc_stats stats;
494+ int rc, i;
495+
496+ rc = readl(snfi->regs + NFI_STA) & STA_EMP_PAGE;
497+ if (rc) {
498+ memset(buf, 0xff, sectors * snfi->caps->nand_sec_size);
499+ for (i = 0; i < sectors; i++)
500+ memset(spinand->oobbuf, 0xff,
501+ snfi->caps->nand_fdm_size);
502+ return 0;
503+ }
504+
505+ mtk_ecc_get_stats(snfi->ecc, &stats, sectors);
506+ mtd->ecc_stats.corrected += stats.corrected;
507+ mtd->ecc_stats.failed += stats.failed;
508+
509+ return 0;
510+}
511+
512+static int mtk_snfi_hw_runtime_config(struct spi_mem *mem)
513+{
514+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
515+ struct mtd_info *mtd = spinand_to_mtd(spinand);
516+ struct nand_device *nand = mtd_to_nanddev(mtd);
517+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
518+ const struct mtk_snfi_caps *caps = snfi->caps;
519+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
520+ u32 fmt, spare, i = 0;
521+ int ret;
522+
523+ ret = mtk_snfi_set_spare_per_sector(spinand, caps, &spare);
524+ if (ret)
525+ return ret;
526+
527+ /* calculate usable oob bytes for ecc parity data */
528+ snfi_nand->spare_per_sector = spare;
529+ spare -= caps->nand_fdm_size;
530+
531+ nand->memorg.oobsize = snfi_nand->spare_per_sector
532+ * (mtd->writesize / caps->nand_sec_size);
533+ mtd->oobsize = nanddev_per_page_oobsize(nand);
534+
535+ snfi->ecc_cfg.strength = (spare << 3) / caps->ecc_parity_bits;
536+ mtk_ecc_adjust_strength(snfi->ecc, &snfi->ecc_cfg.strength);
537+
538+ switch (mtd->writesize) {
539+ case 512:
540+ fmt = PAGEFMT_512;
541+ break;
542+ case KB(2):
543+ fmt = PAGEFMT_2K;
544+ break;
545+ case KB(4):
546+ fmt = PAGEFMT_4K;
547+ break;
548+ default:
549+ dev_err(snfi->dev, "invalid page len: %d\n", mtd->writesize);
550+ return -EINVAL;
551+ }
552+
553+ /* Setup PageFormat */
554+ while (caps->spare_size[i] != snfi_nand->spare_per_sector) {
555+ i++;
556+ if (i == (caps->num_spare_size - 1)) {
557+ dev_err(snfi->dev, "invalid spare size %d\n",
558+ snfi_nand->spare_per_sector);
559+ return -EINVAL;
560+ }
561+ }
562+
563+ fmt |= i << caps->pageformat_spare_shift;
564+ fmt |= caps->nand_fdm_size << PAGEFMT_FDM_SHIFT;
565+ fmt |= caps->nand_fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
566+ writel(fmt, snfi->regs + NFI_PAGEFMT);
567+
568+ snfi->ecc_cfg.len = caps->nand_sec_size + caps->nand_fdm_ecc_size;
569+
570+ mtk_snfi_set_bad_mark_ctl(&snfi_nand->bad_mark, mem);
571+
572+ return 0;
573+}
574+
575+static int mtk_snfi_read_from_cache(struct spi_mem *mem,
576+ const struct spi_mem_op *op, int oob_on)
577+{
578+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
579+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
580+ struct mtd_info *mtd = spinand_to_mtd(spinand);
581+ u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
582+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
583+ u32 reg, len, col_addr = 0;
584+ int dummy_cycle, ret;
585+ dma_addr_t dma_addr;
586+
587+ len = sectors * (snfi->caps->nand_sec_size
588+ + snfi_nand->spare_per_sector);
589+
590+ dma_addr = dma_map_single(snfi->dev, snfi->buffer,
591+ len, DMA_FROM_DEVICE);
592+ ret = dma_mapping_error(snfi->dev, dma_addr);
593+ if (ret) {
594+ dev_err(snfi->dev, "dma mapping error\n");
595+ return -EINVAL;
596+ }
597+
598+ /* set Read cache command and dummy cycle */
599+ dummy_cycle = (op->dummy.nbytes << 3) >> (ffs(op->dummy.buswidth) - 1);
600+ reg = ((op->cmd.opcode & RD_CMD_MASK) |
601+ (dummy_cycle << RD_DUMMY_SHIFT));
602+ writel(reg, snfi->regs + SNFI_RD_CTL2);
603+
604+ writel((col_addr & RD_ADDR_MASK), snfi->regs + SNFI_RD_CTL3);
605+
606+ reg = readl(snfi->regs + SNFI_MISC_CTL);
607+ reg |= RD_CUSTOM_EN;
608+ reg &= ~(RD_MODE_MASK | WR_X4_EN);
609+
610+ /* set data and addr buswidth */
611+ if (op->data.buswidth == 4)
612+ reg |= RD_MODE_X4;
613+ else if (op->data.buswidth == 2)
614+ reg |= RD_MODE_X2;
615+
616+ if (op->addr.buswidth == 4 || op->addr.buswidth == 2)
617+ reg |= RD_QDUAL_IO;
618+ writel(reg, snfi->regs + SNFI_MISC_CTL);
619+
620+ writel(len, snfi->regs + SNFI_MISC_CTL2);
621+ writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
622+ reg = readw(snfi->regs + NFI_CNFG);
623+ reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA | CNFG_OP_CUST;
624+
625+ if (!oob_on) {
626+ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
627+ writew(reg, snfi->regs + NFI_CNFG);
628+
629+ snfi->ecc_cfg.mode = ECC_NFI_MODE;
630+ snfi->ecc_cfg.sectors = sectors;
631+ snfi->ecc_cfg.op = ECC_DECODE;
632+ ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
633+ if (ret) {
634+ dev_err(snfi->dev, "ecc enable failed\n");
635+ /* clear NFI_CNFG */
636+ reg &= ~(CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA |
637+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
638+ writew(reg, snfi->regs + NFI_CNFG);
639+ goto out;
640+ }
641+ } else {
642+ writew(reg, snfi->regs + NFI_CNFG);
643+ }
644+
645+ writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
646+ readw(snfi->regs + NFI_INTR_STA);
647+ writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
648+
649+ init_completion(&snfi->done);
650+
651+ /* set dummy command to trigger NFI enter SPI mode */
652+ writew(NAND_CMD_DUMMYREAD, snfi->regs + NFI_CMD);
653+ reg = readl(snfi->regs + NFI_CON) | CON_BRD;
654+ writew(reg, snfi->regs + NFI_CON);
655+
656+ ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
657+ if (!ret) {
658+ dev_err(snfi->dev, "read ahb done timeout\n");
659+ writew(0, snfi->regs + NFI_INTR_EN);
660+ ret = -ETIMEDOUT;
661+ goto out;
662+ }
663+
664+ ret = readl_poll_timeout_atomic(snfi->regs + NFI_BYTELEN, reg,
665+ ADDRCNTR_SEC(reg) >= sectors, 10,
666+ MTK_TIMEOUT);
667+ if (ret < 0) {
668+ dev_err(snfi->dev, "polling read byte len timeout\n");
669+ ret = -EIO;
670+ } else {
671+ if (!oob_on) {
672+ ret = mtk_ecc_wait_done(snfi->ecc, ECC_DECODE);
673+ if (ret) {
674+ dev_warn(snfi->dev, "wait ecc done timeout\n");
675+ } else {
676+ mtk_snfi_update_ecc_stats(mem, snfi->buffer,
677+ sectors);
678+ mtk_snfi_read_fdm_data(mem, sectors);
679+ }
680+ }
681+ }
682+
683+ if (oob_on)
684+ goto out;
685+
686+ mtk_ecc_disable(snfi->ecc);
687+out:
688+ dma_unmap_single(snfi->dev, dma_addr, len, DMA_FROM_DEVICE);
689+ writel(0, snfi->regs + NFI_CON);
690+ writel(0, snfi->regs + NFI_CNFG);
691+ reg = readl(snfi->regs + SNFI_MISC_CTL);
692+ reg &= ~RD_CUSTOM_EN;
693+ writel(reg, snfi->regs + SNFI_MISC_CTL);
694+
695+ return ret;
696+}
697+
698+static int mtk_snfi_write_to_cache(struct spi_mem *mem,
699+ const struct spi_mem_op *op,
700+ int oob_on)
701+{
702+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
703+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
704+ struct mtd_info *mtd = spinand_to_mtd(spinand);
705+ u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
706+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
707+ u32 reg, len, col_addr = 0;
708+ dma_addr_t dma_addr;
709+ int ret;
710+
711+ len = sectors * (snfi->caps->nand_sec_size
712+ + snfi_nand->spare_per_sector);
713+
714+ dma_addr = dma_map_single(snfi->dev, snfi->buffer, len,
715+ DMA_TO_DEVICE);
716+ ret = dma_mapping_error(snfi->dev, dma_addr);
717+ if (ret) {
718+ dev_err(snfi->dev, "dma mapping error\n");
719+ return -EINVAL;
720+ }
721+
722+ /* set program load cmd and address */
723+ reg = (op->cmd.opcode << WR_LOAD_CMD_SHIFT);
724+ writel(reg, snfi->regs + SNFI_PG_CTL1);
725+ writel(col_addr & WR_LOAD_ADDR_MASK, snfi->regs + SNFI_PG_CTL2);
726+
727+ reg = readl(snfi->regs + SNFI_MISC_CTL);
728+ reg |= WR_CUSTOM_EN;
729+ reg &= ~(RD_MODE_MASK | WR_X4_EN);
730+
731+ if (op->data.buswidth == 4)
732+ reg |= WR_X4_EN;
733+ writel(reg, snfi->regs + SNFI_MISC_CTL);
734+
735+ writel(len << WR_LEN_SHIFT, snfi->regs + SNFI_MISC_CTL2);
736+ writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
737+
738+ reg = readw(snfi->regs + NFI_CNFG);
739+ reg &= ~(CNFG_READ_EN | CNFG_BYTE_RW);
740+ reg |= CNFG_DMA | CNFG_DMA_BURST_EN | CNFG_OP_PROGRAM;
741+
742+ if (!oob_on) {
743+ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
744+ writew(reg, snfi->regs + NFI_CNFG);
745+
746+ snfi->ecc_cfg.mode = ECC_NFI_MODE;
747+ snfi->ecc_cfg.op = ECC_ENCODE;
748+ ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
749+ if (ret) {
750+ dev_err(snfi->dev, "ecc enable failed\n");
751+ /* clear NFI_CNFG */
752+ reg &= ~(CNFG_DMA_BURST_EN | CNFG_DMA |
753+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
754+ writew(reg, snfi->regs + NFI_CNFG);
755+ dma_unmap_single(snfi->dev, dma_addr, len,
756+ DMA_FROM_DEVICE);
757+ goto out;
758+ }
759+ /* write OOB into the FDM registers (OOB area in MTK NAND) */
760+ mtk_snfi_write_fdm_data(mem, sectors);
761+ } else {
762+ writew(reg, snfi->regs + NFI_CNFG);
763+ }
764+ writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
765+ readw(snfi->regs + NFI_INTR_STA);
766+ writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
767+
768+ init_completion(&snfi->done);
769+
770+ /* set dummy command to trigger NFI enter SPI mode */
771+ writew(NAND_CMD_DUMMYPROG, snfi->regs + NFI_CMD);
772+ reg = readl(snfi->regs + NFI_CON) | CON_BWR;
773+ writew(reg, snfi->regs + NFI_CON);
774+
775+ ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
776+ if (!ret) {
777+ dev_err(snfi->dev, "custom program done timeout\n");
778+ writew(0, snfi->regs + NFI_INTR_EN);
779+ ret = -ETIMEDOUT;
780+ goto ecc_disable;
781+ }
782+
783+ ret = readl_poll_timeout_atomic(snfi->regs + NFI_ADDRCNTR, reg,
784+ ADDRCNTR_SEC(reg) >= sectors,
785+ 10, MTK_TIMEOUT);
786+ if (ret)
787+ dev_err(snfi->dev, "hwecc write timeout\n");
788+
789+ecc_disable:
790+ mtk_ecc_disable(snfi->ecc);
791+
792+out:
793+ dma_unmap_single(snfi->dev, dma_addr, len, DMA_TO_DEVICE);
794+ writel(0, snfi->regs + NFI_CON);
795+ writel(0, snfi->regs + NFI_CNFG);
796+ reg = readl(snfi->regs + SNFI_MISC_CTL);
797+ reg &= ~WR_CUSTOM_EN;
798+ writel(reg, snfi->regs + SNFI_MISC_CTL);
799+
800+ return ret;
801+}
802+
803+static int mtk_snfi_read(struct spi_mem *mem,
804+ const struct spi_mem_op *op)
805+{
806+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
807+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
808+ struct mtd_info *mtd = spinand_to_mtd(spinand);
809+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
810+ u32 col_addr = op->addr.val;
811+ int i, ret, sectors, oob_on = false;
812+
813+ if (col_addr == mtd->writesize)
814+ oob_on = true;
815+
816+ ret = mtk_snfi_read_from_cache(mem, op, oob_on);
817+ if (ret) {
818+ dev_warn(snfi->dev, "read from cache fail\n");
819+ return ret;
820+ }
821+
822+ sectors = mtd->writesize / snfi->caps->nand_sec_size;
823+ for (i = 0; i < sectors; i++) {
824+ if (oob_on)
825+ memcpy(oob_ptr(mem, i),
826+ mtk_oob_ptr(mem, snfi->buffer, i),
827+ snfi->caps->nand_fdm_size);
828+
829+ if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
830+ snfi_nand->bad_mark.bm_swap(mem, snfi->buffer,
831+ oob_on);
832+ }
833+
834+ if (!oob_on)
835+ memcpy(spinand->databuf, snfi->buffer, mtd->writesize);
836+
837+ return ret;
838+}
839+
840+static int mtk_snfi_write(struct spi_mem *mem,
841+ const struct spi_mem_op *op)
842+{
843+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
844+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
845+ struct mtd_info *mtd = spinand_to_mtd(spinand);
846+ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
847+ u32 ret, i, sectors, col_addr = op->addr.val;
848+ int oob_on = false;
849+
850+ if (col_addr == mtd->writesize)
851+ oob_on = true;
852+
853+ sectors = mtd->writesize / snfi->caps->nand_sec_size;
854+ memset(snfi->buffer, 0xff, mtd->writesize + mtd->oobsize);
855+
856+ if (!oob_on)
857+ memcpy(snfi->buffer, spinand->databuf, mtd->writesize);
858+
859+ for (i = 0; i < sectors; i++) {
860+ if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
861+ snfi_nand->bad_mark.bm_swap(mem, snfi->buffer, oob_on);
862+
863+ if (oob_on)
864+ memcpy(mtk_oob_ptr(mem, snfi->buffer, i),
865+ oob_ptr(mem, i),
866+ snfi->caps->nand_fdm_size);
867+ }
868+
869+ ret = mtk_snfi_write_to_cache(mem, op, oob_on);
870+ if (ret)
871+ dev_warn(snfi->dev, "write to cache fail\n");
872+
873+ return ret;
874+}
875+
876+static int mtk_snfi_command_exec(struct mtk_snfi *snfi,
877+ const u8 *txbuf, u8 *rxbuf,
878+ const u32 txlen, const u32 rxlen)
879+{
880+ u32 tmp, i, j, reg, m;
881+ u8 *p_tmp = (u8 *)(&tmp);
882+ int ret = 0;
883+
884+ /* Moving tx data to NFI_SPI GPRAM */
885+ for (i = 0, m = 0; i < txlen; ) {
886+ for (j = 0, tmp = 0; i < txlen && j < 4; i++, j++)
887+ p_tmp[j] = txbuf[i];
888+
889+ writel(tmp, snfi->regs + SNFI_GPRAM_DATA + m);
890+ m += 4;
891+ }
892+
893+ writel(txlen, snfi->regs + SNFI_MAC_OUTL);
894+ writel(rxlen, snfi->regs + SNFI_MAC_INL);
895+ ret = mtk_snfi_mac_op(snfi);
896+ if (ret)
897+ return ret;
898+
899+ /* For NULL input data, this loop will be skipped */
900+ if (rxlen)
901+ for (i = 0, m = 0; i < rxlen; ) {
902+ reg = readl(snfi->regs +
903+ SNFI_GPRAM_DATA + m);
904+ for (j = 0; i < rxlen && j < 4; i++, j++, rxbuf++) {
905+ if (m == 0 && i == 0)
906+ j = i + txlen;
907+ *rxbuf = (reg >> (j * 8)) & 0xFF;
908+ }
909+ m += 4;
910+ }
911+
912+ return ret;
913+}
914+
915+/*
916+ * mtk_snfi_exec_op - to process command/data to send to the
917+ * SPI NAND by mtk controller
918+ */
919+static int mtk_snfi_exec_op(struct spi_mem *mem,
920+ const struct spi_mem_op *op)
921+
922+{
923+ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
924+ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
925+ struct mtd_info *mtd = spinand_to_mtd(spinand);
926+ struct nand_device *nand = mtd_to_nanddev(mtd);
927+ const struct spi_mem_op *read_cache;
928+ const struct spi_mem_op *write_cache;
929+ const struct spi_mem_op *update_cache;
930+ u32 tmpbufsize, txlen = 0, rxlen = 0;
931+ u8 *txbuf, *rxbuf = NULL, *buf;
932+ int i, ret = 0;
933+
934+ ret = mtk_snfi_reset(snfi);
935+ if (ret) {
936+ dev_warn(snfi->dev, "reset spi memory controller fail\n");
937+ return ret;
938+ }
939+
940+ /*if bbt initial, framework have detect nand information */
941+ if (nand->bbt.cache) {
942+ read_cache = spinand->op_templates.read_cache;
943+ write_cache = spinand->op_templates.write_cache;
944+ update_cache = spinand->op_templates.update_cache;
945+
946+ ret = mtk_snfi_hw_runtime_config(mem);
947+ if (ret)
948+ return ret;
949+
950+ /* For Read/Write with cache, Erase use framework flow */
951+ if (op->cmd.opcode == read_cache->cmd.opcode) {
952+ ret = mtk_snfi_read(mem, op);
953+ if (ret)
954+ dev_warn(snfi->dev, "snfi read fail\n");
955+
956+ return ret;
957+ } else if ((op->cmd.opcode == write_cache->cmd.opcode)
958+ || (op->cmd.opcode == update_cache->cmd.opcode)) {
959+ ret = mtk_snfi_write(mem, op);
960+ if (ret)
961+ dev_warn(snfi->dev, "snfi write fail\n");
962+
963+ return ret;
964+ }
965+ }
966+
967+ tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
968+ op->dummy.nbytes + op->data.nbytes;
969+
970+ txbuf = kzalloc(tmpbufsize, GFP_KERNEL);
971+ if (!txbuf)
972+ return -ENOMEM;
973+
974+ txbuf[txlen++] = op->cmd.opcode;
975+
976+ if (op->addr.nbytes)
977+ for (i = 0; i < op->addr.nbytes; i++)
978+ txbuf[txlen++] = op->addr.val >>
979+ (8 * (op->addr.nbytes - i - 1));
980+
981+ txlen += op->dummy.nbytes;
982+
983+ if (op->data.dir == SPI_MEM_DATA_OUT)
984+ for (i = 0; i < op->data.nbytes; i++) {
985+ buf = (u8 *)op->data.buf.out;
986+ txbuf[txlen++] = buf[i];
987+ }
988+
989+ if (op->data.dir == SPI_MEM_DATA_IN) {
990+ rxbuf = (u8 *)op->data.buf.in;
991+ rxlen += op->data.nbytes;
992+ }
993+
994+ ret = mtk_snfi_command_exec(snfi, txbuf, rxbuf, txlen, rxlen);
995+ kfree(txbuf);
996+
997+ return ret;
998+}
999+
1000+static int mtk_snfi_init(struct mtk_snfi *snfi)
1001+{
1002+ int ret;
1003+
1004+ /* Reset the state machine and data FIFO */
1005+ ret = mtk_snfi_reset(snfi);
1006+ if (ret) {
1007+ dev_warn(snfi->dev, "MTK reset controller fail\n");
1008+ return ret;
1009+ }
1010+
1011+ snfi->buffer = devm_kzalloc(snfi->dev, 4096 + 256, GFP_KERNEL);
1012+ if (!snfi->buffer)
1013+ return -ENOMEM;
1014+
1015+ /* Clear interrupt, read clear. */
1016+ readw(snfi->regs + NFI_INTR_STA);
1017+ writew(0, snfi->regs + NFI_INTR_EN);
1018+
1019+ writel(0, snfi->regs + NFI_CON);
1020+ writel(0, snfi->regs + NFI_CNFG);
1021+
1022+ /* Change to NFI_SPI mode. */
1023+ writel(SNFI_MODE_EN, snfi->regs + SNFI_CNFG);
1024+
1025+ return 0;
1026+}
1027+
1028+static int mtk_snfi_check_buswidth(u8 width)
1029+{
1030+ switch (width) {
1031+ case 1:
1032+ case 2:
1033+ case 4:
1034+ return 0;
1035+
1036+ default:
1037+ break;
1038+ }
1039+
1040+ return -ENOTSUPP;
1041+}
1042+
1043+static bool mtk_snfi_supports_op(struct spi_mem *mem,
1044+ const struct spi_mem_op *op)
1045+{
1046+ int ret = 0;
1047+
1048+ /* For MTK Spi Nand controller, cmd buswidth just support 1 bit*/
1049+ if (op->cmd.buswidth != 1)
1050+ ret = -ENOTSUPP;
1051+
1052+ if (op->addr.nbytes)
1053+ ret |= mtk_snfi_check_buswidth(op->addr.buswidth);
1054+
1055+ if (op->dummy.nbytes)
1056+ ret |= mtk_snfi_check_buswidth(op->dummy.buswidth);
1057+
1058+ if (op->data.nbytes)
1059+ ret |= mtk_snfi_check_buswidth(op->data.buswidth);
1060+
1061+ if (ret)
1062+ return false;
1063+
1064+ return true;
1065+}
1066+
1067+static const struct spi_controller_mem_ops mtk_snfi_ops = {
1068+ .supports_op = mtk_snfi_supports_op,
1069+ .exec_op = mtk_snfi_exec_op,
1070+};
1071+
1072+static const struct mtk_snfi_caps snfi_mt7622 = {
1073+ .spare_size = spare_size_mt7622,
1074+ .num_spare_size = 4,
1075+ .nand_sec_size = 512,
1076+ .nand_fdm_size = 8,
1077+ .nand_fdm_ecc_size = 1,
1078+ .ecc_parity_bits = 13,
1079+ .pageformat_spare_shift = 4,
1080+ .bad_mark_swap = 0,
1081+};
1082+
1083+static const struct mtk_snfi_caps snfi_mt7629 = {
1084+ .spare_size = spare_size_mt7622,
1085+ .num_spare_size = 4,
1086+ .nand_sec_size = 512,
1087+ .nand_fdm_size = 8,
1088+ .nand_fdm_ecc_size = 1,
1089+ .ecc_parity_bits = 13,
1090+ .pageformat_spare_shift = 4,
1091+ .bad_mark_swap = 1,
1092+};
1093+
1094+static const struct of_device_id mtk_snfi_id_table[] = {
1095+ { .compatible = "mediatek,mt7622-snfi", .data = &snfi_mt7622, },
1096+ { .compatible = "mediatek,mt7629-snfi", .data = &snfi_mt7629, },
1097+ { /* sentinel */ }
1098+};
1099+
1100+static int mtk_snfi_probe(struct platform_device *pdev)
1101+{
1102+ struct device *dev = &pdev->dev;
1103+ struct device_node *np = dev->of_node;
1104+ struct spi_controller *ctlr;
1105+ struct mtk_snfi *snfi;
1106+ struct resource *res;
1107+ int ret = 0, irq;
1108+
1109+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*snfi));
1110+ if (!ctlr)
1111+ return -ENOMEM;
1112+
1113+ snfi = spi_controller_get_devdata(ctlr);
1114+ snfi->caps = of_device_get_match_data(dev);
1115+ snfi->dev = dev;
1116+
1117+ snfi->ecc = of_mtk_ecc_get(np);
1118+ if (IS_ERR_OR_NULL(snfi->ecc))
1119+ goto err_put_master;
1120+
1121+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1122+ snfi->regs = devm_ioremap_resource(dev, res);
1123+ if (IS_ERR(snfi->regs)) {
1124+ ret = PTR_ERR(snfi->regs);
1125+ goto release_ecc;
1126+ }
1127+
1128+ /* find the clocks */
1129+ snfi->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1130+ if (IS_ERR(snfi->clk.nfi_clk)) {
1131+ dev_err(dev, "no nfi clk\n");
1132+ ret = PTR_ERR(snfi->clk.nfi_clk);
1133+ goto release_ecc;
1134+ }
1135+
1136+ snfi->clk.spi_clk = devm_clk_get(dev, "spi_clk");
1137+ if (IS_ERR(snfi->clk.spi_clk)) {
1138+ dev_err(dev, "no spi clk\n");
1139+ ret = PTR_ERR(snfi->clk.spi_clk);
1140+ goto release_ecc;
1141+ }
1142+
1143+ ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1144+ if (ret)
1145+ goto release_ecc;
1146+
1147+ /* find the irq */
1148+ irq = platform_get_irq(pdev, 0);
1149+ if (irq < 0) {
1150+ dev_err(dev, "no snfi irq resource\n");
1151+ ret = -EINVAL;
1152+ goto clk_disable;
1153+ }
1154+
1155+ ret = devm_request_irq(dev, irq, mtk_snfi_irq, 0, "mtk-snfi", snfi);
1156+ if (ret) {
1157+ dev_err(dev, "failed to request snfi irq\n");
1158+ goto clk_disable;
1159+ }
1160+
1161+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1162+ if (ret) {
1163+ dev_err(dev, "failed to set dma mask\n");
1164+ goto clk_disable;
1165+ }
1166+
1167+ ctlr->dev.of_node = np;
1168+ ctlr->mem_ops = &mtk_snfi_ops;
1169+
1170+ platform_set_drvdata(pdev, snfi);
1171+ ret = mtk_snfi_init(snfi);
1172+ if (ret) {
1173+ dev_err(dev, "failed to init snfi\n");
1174+ goto clk_disable;
1175+ }
1176+
1177+ ret = devm_spi_register_master(dev, ctlr);
1178+ if (ret)
1179+ goto clk_disable;
1180+
1181+ return 0;
1182+
1183+clk_disable:
1184+ mtk_snfi_disable_clk(&snfi->clk);
1185+
1186+release_ecc:
1187+ mtk_ecc_release(snfi->ecc);
1188+
1189+err_put_master:
1190+ spi_master_put(ctlr);
1191+
1192+ dev_err(dev, "MediaTek SPI NAND interface probe failed %d\n", ret);
1193+ return ret;
1194+}
1195+
1196+static int mtk_snfi_remove(struct platform_device *pdev)
1197+{
1198+ struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1199+
1200+ mtk_snfi_disable_clk(&snfi->clk);
1201+
1202+ return 0;
1203+}
1204+
1205+static int mtk_snfi_suspend(struct platform_device *pdev, pm_message_t state)
1206+{
1207+ struct mtk_snfi *snfi = platform_get_drvdata(pdev);
1208+
1209+ mtk_snfi_disable_clk(&snfi->clk);
1210+
1211+ return 0;
1212+}
1213+
1214+static int mtk_snfi_resume(struct platform_device *pdev)
1215+{
1216+ struct device *dev = &pdev->dev;
1217+ struct mtk_snfi *snfi = dev_get_drvdata(dev);
1218+ int ret;
1219+
1220+ ret = mtk_snfi_enable_clk(dev, &snfi->clk);
1221+ if (ret)
1222+ return ret;
1223+
1224+ ret = mtk_snfi_init(snfi);
1225+ if (ret)
1226+ dev_err(dev, "failed to init snfi controller\n");
1227+
1228+ return ret;
1229+}
1230+
1231+static struct platform_driver mtk_snfi_driver = {
1232+ .driver = {
1233+ .name = "mtk-snfi",
1234+ .of_match_table = mtk_snfi_id_table,
1235+ },
1236+ .probe = mtk_snfi_probe,
1237+ .remove = mtk_snfi_remove,
1238+ .suspend = mtk_snfi_suspend,
1239+ .resume = mtk_snfi_resume,
1240+};
1241+
1242+module_platform_driver(mtk_snfi_driver);
1243+
1244+MODULE_LICENSE("GPL v2");
1245+MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>");
1246+MODULE_DESCRIPTION("Mediatek SPI Memory Interface Driver");