blob: 884bcfc17cf5b76035c6244e116ef567a4ca1eca [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
developerae50ce92021-05-18 19:08:57 +080060#define NFI_ADDRCNTR 0x070
61#define SEC_CNTR GENMASK(16, 12)
developerab415832021-05-26 09:49:32 +080062#define SEC_CNTR_S 12
63#define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080064
developerfd40db22021-04-29 10:08:25 +080065#define NFI_STRADDR 0x080
66
developerae50ce92021-05-18 19:08:57 +080067#define NFI_BYTELEN 0x084
developerab415832021-05-26 09:49:32 +080068#define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080069
developerfd40db22021-04-29 10:08:25 +080070#define NFI_FDM0L 0x0a0
71#define NFI_FDM0M 0x0a4
72#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
73#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
74
75#define NFI_DEBUG_CON1 0x220
76#define WBUF_EN BIT(2)
77
78#define NFI_MASTERSTA 0x224
79#define MAS_ADDR GENMASK(11, 9)
80#define MAS_RD GENMASK(8, 6)
81#define MAS_WR GENMASK(5, 3)
82#define MAS_RDDLY GENMASK(2, 0)
83#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
84#define AHB_BUS_BUSY BIT(1)
85#define BUS_BUSY BIT(0)
86#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
87
88/* SNFI registers */
89#define SNF_MAC_CTL 0x500
90#define MAC_XIO_SEL BIT(4)
91#define SF_MAC_EN BIT(3)
92#define SF_TRIG BIT(2)
93#define WIP_READY BIT(1)
94#define WIP BIT(0)
95
96#define SNF_MAC_OUTL 0x504
97#define SNF_MAC_INL 0x508
98
99#define SNF_RD_CTL2 0x510
100#define DATA_READ_DUMMY_S 8
101#define DATA_READ_CMD_S 0
102
103#define SNF_RD_CTL3 0x514
104
105#define SNF_PG_CTL1 0x524
106#define PG_LOAD_CMD_S 8
107
108#define SNF_PG_CTL2 0x528
109
110#define SNF_MISC_CTL 0x538
111#define SW_RST BIT(28)
112#define FIFO_RD_LTC_S 25
113#define PG_LOAD_X4_EN BIT(20)
114#define DATA_READ_MODE_S 16
115#define DATA_READ_MODE GENMASK(18, 16)
116#define DATA_READ_MODE_X1 0
117#define DATA_READ_MODE_X2 1
118#define DATA_READ_MODE_X4 2
119#define DATA_READ_MODE_DUAL 5
120#define DATA_READ_MODE_QUAD 6
developer17ded802021-07-06 20:48:25 +0800121#define LATCH_LAT_S 8
122#define LATCH_LAT GENMASK(9, 8)
developerfd40db22021-04-29 10:08:25 +0800123#define PG_LOAD_CUSTOM_EN BIT(7)
124#define DATARD_CUSTOM_EN BIT(6)
125#define CS_DESELECT_CYC_S 0
126
127#define SNF_MISC_CTL2 0x53c
128#define PROGRAM_LOAD_BYTE_NUM_S 16
129#define READ_DATA_BYTE_NUM_S 11
130
131#define SNF_DLY_CTL3 0x548
132#define SFCK_SAM_DLY_S 0
133
134#define SNF_STA_CTL1 0x550
135#define CUS_PG_DONE BIT(28)
136#define CUS_READ_DONE BIT(27)
137#define SPI_STATE_S 0
138#define SPI_STATE GENMASK(3, 0)
139
140#define SNF_CFG 0x55c
141#define SPI_MODE BIT(0)
142
143#define SNF_GPRAM 0x800
144#define SNF_GPRAM_SIZE 0xa0
145
146#define SNFI_POLL_INTERVAL 1000000
147
148static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
149
150static const uint8_t mt7986_spare_sizes[] = {
151 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
152 67, 74
153};
154
155static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
156 [SNAND_SOC_MT7622] = {
157 .sector_size = 512,
158 .max_sectors = 8,
159 .fdm_size = 8,
160 .fdm_ecc_size = 1,
161 .fifo_size = 32,
162 .bbm_swap = false,
163 .empty_page_check = false,
164 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
165 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800166 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
167 .latch_lat = 0,
168 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800169 },
170 [SNAND_SOC_MT7629] = {
171 .sector_size = 512,
172 .max_sectors = 8,
173 .fdm_size = 8,
174 .fdm_ecc_size = 1,
175 .fifo_size = 32,
176 .bbm_swap = true,
177 .empty_page_check = false,
178 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
179 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800180 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
181 .latch_lat = 0,
182 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800183 },
184 [SNAND_SOC_MT7986] = {
185 .sector_size = 1024,
186 .max_sectors = 16,
187 .fdm_size = 8,
188 .fdm_ecc_size = 1,
189 .fifo_size = 64,
190 .bbm_swap = true,
191 .empty_page_check = true,
192 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
193 .spare_sizes = mt7986_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800194 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes),
developer7d60cdf2021-08-05 15:53:17 +0800195 .latch_lat = 0,
196 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800197 },
198};
199
200static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
201{
202 return readl(snf->nfi_base + reg);
203}
204
205static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
206 uint32_t val)
207{
208 writel(val, snf->nfi_base + reg);
209}
210
211static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
212 uint16_t val)
213{
214 writew(val, snf->nfi_base + reg);
215}
216
217static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
218 uint32_t set)
219{
220 uint32_t val;
221
222 val = readl(snf->nfi_base + reg);
223 val &= ~clr;
224 val |= set;
225 writel(val, snf->nfi_base + reg);
226}
227
228static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
229 const uint8_t *data, uint32_t len)
230{
231 uint32_t i, val = 0, es = sizeof(uint32_t);
232
233 for (i = reg; i < reg + len; i++) {
234 val |= ((uint32_t)*data++) << (8 * (i % es));
235
236 if (i % es == es - 1 || i == reg + len - 1) {
237 nfi_write32(snf, i & ~(es - 1), val);
238 val = 0;
239 }
240 }
241}
242
243static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
244 uint32_t len)
245{
246 uint32_t i, val = 0, es = sizeof(uint32_t);
247
248 for (i = reg; i < reg + len; i++) {
249 if (i == reg || i % es == 0)
250 val = nfi_read32(snf, i & ~(es - 1));
251
252 *data++ = (uint8_t)(val >> (8 * (i % es)));
253 }
254}
255
256static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
257{
258 uint8_t tmp = *bm1;
259 *bm1 = *bm2;
260 *bm2 = tmp;
261}
262
263static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
264{
265 uint32_t fdm_bbm_pos;
266
267 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
268 return;
269
270 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
271 snf->nfi_soc->sector_size;
272 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
273 &snf->page_cache[snf->writesize]);
274}
275
276static void mtk_snand_bm_swap(struct mtk_snand *snf)
277{
278 uint32_t buf_bbm_pos, fdm_bbm_pos;
279
280 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
281 return;
282
283 buf_bbm_pos = snf->writesize -
284 (snf->ecc_steps - 1) * snf->spare_per_sector;
285 fdm_bbm_pos = snf->writesize +
286 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
287 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
288 &snf->page_cache[buf_bbm_pos]);
289}
290
291static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
292{
293 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
294
295 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
296 return;
297
298 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
299 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
300 snf->nfi_soc->sector_size;
301 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
302 &snf->page_cache[fdm_bbm_pos2]);
303}
304
305static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
306{
307 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
308
309 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
310 return;
311
312 fdm_bbm_pos1 = snf->writesize;
313 fdm_bbm_pos2 = snf->writesize +
314 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
315 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
316 &snf->page_cache[fdm_bbm_pos2]);
317}
318
319static int mtk_nfi_reset(struct mtk_snand *snf)
320{
321 uint32_t val, fifo_mask;
322 int ret;
323
324 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
325
326 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
327 !(val & snf->nfi_soc->mastersta_mask), 0,
328 SNFI_POLL_INTERVAL);
329 if (ret) {
330 snand_log_nfi(snf->pdev,
331 "NFI master is still busy after reset\n");
332 return ret;
333 }
334
335 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
336 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
337 SNFI_POLL_INTERVAL);
338 if (ret) {
339 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
340 return ret;
341 }
342
343 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
344 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
345 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
346 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
347 if (ret) {
348 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
349 return ret;
350 }
351
352 return 0;
353}
354
355static int mtk_snand_mac_reset(struct mtk_snand *snf)
356{
357 int ret;
358 uint32_t val;
359
360 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
361
362 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
363 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
364 if (ret)
365 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
366
367 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
developer17ded802021-07-06 20:48:25 +0800368 (10 << CS_DESELECT_CYC_S) | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800369
370 return ret;
371}
372
373static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
374 uint32_t inlen)
375{
376 int ret;
377 uint32_t val;
378
379 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
380 nfi_write32(snf, SNF_MAC_OUTL, outlen);
381 nfi_write32(snf, SNF_MAC_INL, inlen);
382
383 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
384
385 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
386 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
387 if (ret) {
388 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
389 goto cleanup;
390 }
391
392 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
393 !(val & WIP), 0, SNFI_POLL_INTERVAL);
394 if (ret) {
395 snand_log_snfi(snf->pdev,
396 "Timed out waiting for WIP cleared\n");
397 }
398
399cleanup:
400 nfi_write32(snf, SNF_MAC_CTL, 0);
401
402 return ret;
403}
404
405int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
406 uint8_t *in, uint32_t inlen)
407{
408 int ret;
409
410 if (outlen + inlen > SNF_GPRAM_SIZE)
411 return -EINVAL;
412
413 mtk_snand_mac_reset(snf);
414
415 nfi_write_data(snf, SNF_GPRAM, out, outlen);
416
417 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
418 if (ret)
419 return ret;
420
421 if (!inlen)
422 return 0;
423
424 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
425
426 return 0;
427}
428
429static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
430{
431 uint8_t op[2], val;
432 int ret;
433
434 op[0] = SNAND_CMD_GET_FEATURE;
435 op[1] = (uint8_t)addr;
436
437 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
438 if (ret)
439 return ret;
440
441 return val;
442}
443
444int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
445{
446 uint8_t op[3];
447
448 op[0] = SNAND_CMD_SET_FEATURE;
449 op[1] = (uint8_t)addr;
450 op[2] = (uint8_t)val;
451
452 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
453}
454
455static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
456{
457 int val;
458 mtk_snand_time_t time_start, tmo;
459
460 time_start = timer_get_ticks();
461 tmo = timer_time_to_tick(wait_us);
462
463 do {
464 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
465 if (!(val & SNAND_STATUS_OIP))
466 return val & (SNAND_STATUS_ERASE_FAIL |
467 SNAND_STATUS_PROGRAM_FAIL);
468 } while (!timer_is_timeout(time_start, tmo));
469
470 return -ETIMEDOUT;
471}
472
473int mtk_snand_chip_reset(struct mtk_snand *snf)
474{
475 uint8_t op = SNAND_CMD_RESET;
476 int ret;
477
478 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
479 if (ret)
480 return ret;
481
482 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
483 if (ret < 0)
484 return ret;
485
486 return 0;
487}
488
489static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
490 uint8_t set)
491{
492 int val, newval;
493 int ret;
494
495 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
496 if (val < 0) {
497 snand_log_chip(snf->pdev,
498 "Failed to get configuration feature\n");
499 return val;
500 }
501
502 newval = (val & (~clr)) | set;
503
504 if (newval == val)
505 return 0;
506
507 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
508 (uint8_t)newval);
509 if (val < 0) {
510 snand_log_chip(snf->pdev,
511 "Failed to set configuration feature\n");
512 return ret;
513 }
514
515 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
516 if (val < 0) {
517 snand_log_chip(snf->pdev,
518 "Failed to get configuration feature\n");
519 return val;
520 }
521
522 if (newval != val)
523 return -ENOTSUPP;
524
525 return 0;
526}
527
528static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
529{
530 int ret;
531
532 if (enable)
533 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
534 else
535 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
536
537 if (ret) {
538 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
539 enable ? "enable" : "disable");
540 }
541
542 return ret;
543}
544
545static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
546{
547 int ret;
548
549 if (enable) {
550 ret = mtk_snand_config_feature(snf, 0,
551 SNAND_FEATURE_QUAD_ENABLE);
552 } else {
553 ret = mtk_snand_config_feature(snf,
554 SNAND_FEATURE_QUAD_ENABLE, 0);
555 }
556
557 if (ret) {
558 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
559 enable ? "enable" : "disable");
560 }
561
562 return ret;
563}
564
565static int mtk_snand_unlock(struct mtk_snand *snf)
566{
567 int ret;
568
569 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
570 if (ret) {
571 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
572 return ret;
573 }
574
575 return 0;
576}
577
578static int mtk_snand_write_enable(struct mtk_snand *snf)
579{
580 uint8_t op = SNAND_CMD_WRITE_ENABLE;
581 int ret, val;
582
583 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
584 if (ret)
585 return ret;
586
587 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
588 if (val < 0)
589 return ret;
590
591 if (val & SNAND_STATUS_WEL)
592 return 0;
593
594 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
595
596 return -ENOTSUPP;
597}
598
599static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
600{
601 if (!snf->select_die)
602 return 0;
603
604 return snf->select_die(snf, dieidx);
605}
606
607static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
608 uint64_t addr)
609{
610 uint32_t dieidx;
611
612 if (!snf->select_die)
613 return addr;
614
615 dieidx = addr >> snf->die_shift;
616
617 mtk_snand_select_die(snf, dieidx);
618
619 return addr & snf->die_mask;
620}
621
622static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
623 uint32_t page)
624{
625 uint32_t pages_per_block;
626
627 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
628
629 if (page & pages_per_block)
630 return 1 << (snf->writesize_shift + 1);
631
632 return 0;
633}
634
635static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
636{
637 uint8_t op[4];
638
639 op[0] = cmd;
640 op[1] = (page >> 16) & 0xff;
641 op[2] = (page >> 8) & 0xff;
642 op[3] = page & 0xff;
643
644 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
645}
646
647static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
648{
649 uint32_t vall, valm;
650 uint8_t *oobptr = buf;
651 int i, j;
652
653 for (i = 0; i < snf->ecc_steps; i++) {
654 vall = nfi_read32(snf, NFI_FDML(i));
655 valm = nfi_read32(snf, NFI_FDMM(i));
656
657 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
658 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
659
660 oobptr += snf->nfi_soc->fdm_size;
661 }
662}
663
developer4da1bed2021-05-08 17:30:37 +0800664static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
665 uint32_t sect, uint8_t *oob)
666{
667 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
668 uint32_t coladdr, raw_offs, offs;
669 uint8_t op[4];
670
671 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
672 snand_log_snfi(snf->pdev,
673 "ECC parity size does not fit the GPRAM\n");
674 return -ENOTSUPP;
675 }
676
677 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
678 snf->nfi_soc->fdm_size;
679 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
680
681 /* Column address with plane bit */
682 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
683
684 op[0] = SNAND_CMD_READ_FROM_CACHE;
685 op[1] = (coladdr >> 8) & 0xff;
686 op[2] = coladdr & 0xff;
687 op[3] = 0;
688
689 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
690}
691
692static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
693{
694 uint8_t *oob = snf->page_cache + snf->writesize;
695 int i, rc, ret = 0, max_bitflips = 0;
696
697 for (i = 0; i < snf->ecc_steps; i++) {
698 if (snf->sect_bf[i] >= 0) {
699 if (snf->sect_bf[i] > max_bitflips)
700 max_bitflips = snf->sect_bf[i];
701 continue;
702 }
703
704 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
705 if (rc)
706 return rc;
707
708 rc = mtk_ecc_fixup_empty_sector(snf, i);
709 if (rc < 0) {
710 ret = -EBADMSG;
711
712 snand_log_ecc(snf->pdev,
713 "Uncorrectable bitflips in page %u sect %u\n",
714 page, i);
developer5136a3f2021-05-20 10:58:54 +0800715 } else if (rc) {
developer4da1bed2021-05-08 17:30:37 +0800716 snf->sect_bf[i] = rc;
717
718 if (snf->sect_bf[i] > max_bitflips)
719 max_bitflips = snf->sect_bf[i];
720
721 snand_log_ecc(snf->pdev,
722 "%u bitflip%s corrected in page %u sect %u\n",
723 rc, rc > 1 ? "s" : "", page, i);
developer5136a3f2021-05-20 10:58:54 +0800724 } else {
725 snf->sect_bf[i] = 0;
developer4da1bed2021-05-08 17:30:37 +0800726 }
727 }
728
729 return ret ? ret : max_bitflips;
730}
731
developerfd40db22021-04-29 10:08:25 +0800732static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
733{
developerae50ce92021-05-18 19:08:57 +0800734 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800735 uintptr_t dma_addr;
736 int ret;
737
738 /* Column address with plane bit */
739 coladdr = mtk_snand_get_plane_address(snf, page);
740
741 mtk_snand_mac_reset(snf);
742 mtk_nfi_reset(snf);
743
744 /* Command and dummy cycles */
745 nfi_write32(snf, SNF_RD_CTL2,
746 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
747 (snf->opcode_rfc << DATA_READ_CMD_S));
748
749 /* Column address */
750 nfi_write32(snf, SNF_RD_CTL3, coladdr);
751
752 /* Set read mode */
753 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
developer17ded802021-07-06 20:48:25 +0800754 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
755 mode | DATARD_CUSTOM_EN | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800756
757 /* Set bytes to read */
758 rwbytes = snf->ecc_steps * snf->raw_sector_size;
759 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
760 rwbytes);
761
762 /* NFI read prepare */
763 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
764 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
765 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
766
767 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
768
769 /* Prepare for DMA read */
770 len = snf->writesize + snf->oobsize;
771 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
772 if (ret) {
773 snand_log_nfi(snf->pdev,
774 "DMA map from device failed with %d\n", ret);
775 return ret;
776 }
777
778 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
779
780 if (!raw)
781 mtk_snand_ecc_decoder_start(snf);
782
783 /* Prepare for custom read interrupt */
784 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
785 irq_completion_init(snf->pdev);
786
787 /* Trigger NFI into custom mode */
788 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
789
790 /* Start DMA read */
791 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
792 nfi_write16(snf, NFI_STRDATA, STR_DATA);
793
794 /* Wait for operation finished */
795 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
796 CUS_READ_DONE, SNFI_POLL_INTERVAL);
797 if (ret) {
798 snand_log_nfi(snf->pdev,
799 "DMA timed out for reading from cache\n");
800 goto cleanup;
801 }
802
developerae50ce92021-05-18 19:08:57 +0800803 /* Wait for BUS_SEC_CNTR returning expected value */
804 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
805 BUS_SEC_CNTR(val) >= snf->ecc_steps,
806 0, SNFI_POLL_INTERVAL);
807 if (ret) {
808 snand_log_nfi(snf->pdev,
809 "Timed out waiting for BUS_SEC_CNTR\n");
810 goto cleanup;
811 }
812
813 /* Wait for bus becoming idle */
814 ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
815 !(val & snf->nfi_soc->mastersta_mask),
816 0, SNFI_POLL_INTERVAL);
817 if (ret) {
818 snand_log_nfi(snf->pdev,
819 "Timed out waiting for bus becoming idle\n");
820 goto cleanup;
821 }
822
developerfd40db22021-04-29 10:08:25 +0800823 if (!raw) {
824 ret = mtk_ecc_wait_decoder_done(snf);
825 if (ret)
826 goto cleanup;
827
828 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
829
developer4da1bed2021-05-08 17:30:37 +0800830 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800831 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800832
833 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800834 }
835
836cleanup:
837 /* DMA cleanup */
838 dma_mem_unmap(snf->pdev, dma_addr, len, false);
839
840 /* Stop read */
841 nfi_write32(snf, NFI_CON, 0);
developer4b9635a2021-05-18 14:22:39 +0800842 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +0800843
844 /* Clear SNF done flag */
845 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
846 nfi_write32(snf, SNF_STA_CTL1, 0);
847
848 /* Disable interrupt */
849 nfi_read32(snf, NFI_INTR_STA);
850 nfi_write32(snf, NFI_INTR_EN, 0);
851
developer17ded802021-07-06 20:48:25 +0800852 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN | LATCH_LAT, 0);
developerfd40db22021-04-29 10:08:25 +0800853
854 return ret;
855}
856
857static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
858{
859 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
860 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
861 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
862
863 for (i = 0; i < snf->ecc_steps; i++) {
864 raw_sector = snf->page_cache + i * snf->raw_sector_size;
865
866 if (buf) {
867 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
868 bufptr += snf->nfi_soc->sector_size;
869 }
870
871 raw_sector += snf->nfi_soc->sector_size;
872
873 if (oob) {
874 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
875 oobptr += snf->nfi_soc->fdm_size;
876 raw_sector += snf->nfi_soc->fdm_size;
877
878 memcpy(eccptr, raw_sector, ecc_bytes);
879 eccptr += ecc_bytes;
880 }
881 }
882}
883
884static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
885 void *buf, void *oob, bool raw, bool format)
886{
887 uint64_t die_addr;
developeref87e252021-07-20 22:57:05 +0800888 uint32_t page, dly_ctrl3;
889 int ret, retry_cnt = 0;
developerfd40db22021-04-29 10:08:25 +0800890
891 die_addr = mtk_snand_select_die_address(snf, addr);
892 page = die_addr >> snf->writesize_shift;
893
developeref87e252021-07-20 22:57:05 +0800894 dly_ctrl3 = nfi_read32(snf, SNF_DLY_CTL3);
895
developerfd40db22021-04-29 10:08:25 +0800896 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
897 if (ret)
898 return ret;
899
900 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
901 if (ret < 0) {
902 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
903 return ret;
904 }
905
developeref87e252021-07-20 22:57:05 +0800906retry:
developerfd40db22021-04-29 10:08:25 +0800907 ret = mtk_snand_read_cache(snf, page, raw);
908 if (ret < 0 && ret != -EBADMSG)
909 return ret;
910
developeref87e252021-07-20 22:57:05 +0800911 if (ret == -EBADMSG && retry_cnt < 16) {
912 nfi_write32(snf, SNF_DLY_CTL3, retry_cnt * 2);
913 retry_cnt++;
914 goto retry;
915 }
916
917 if (retry_cnt) {
918 if(ret == -EBADMSG) {
919 nfi_write32(snf, SNF_DLY_CTL3, dly_ctrl3);
920 snand_log_chip(snf->pdev,
921 "NFI calibration failed. Original sample delay: 0x%x\n",
922 dly_ctrl3);
923 } else {
924 snand_log_chip(snf->pdev,
925 "NFI calibration passed. New sample delay: 0x%x\n",
926 nfi_read32(snf, SNF_DLY_CTL3));
927 }
928 }
929
developerfd40db22021-04-29 10:08:25 +0800930 if (raw) {
931 if (format) {
932 mtk_snand_bm_swap_raw(snf);
933 mtk_snand_fdm_bm_swap_raw(snf);
934 mtk_snand_from_raw_page(snf, buf, oob);
935 } else {
936 if (buf)
937 memcpy(buf, snf->page_cache, snf->writesize);
938
939 if (oob) {
940 memset(oob, 0xff, snf->oobsize);
941 memcpy(oob, snf->page_cache + snf->writesize,
942 snf->ecc_steps * snf->spare_per_sector);
943 }
944 }
945 } else {
946 mtk_snand_bm_swap(snf);
947 mtk_snand_fdm_bm_swap(snf);
948
949 if (buf)
950 memcpy(buf, snf->page_cache, snf->writesize);
951
952 if (oob) {
953 memset(oob, 0xff, snf->oobsize);
954 memcpy(oob, snf->page_cache + snf->writesize,
955 snf->ecc_steps * snf->nfi_soc->fdm_size);
956 }
957 }
958
959 return ret;
960}
961
962int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
963 void *oob, bool raw)
964{
965 if (!snf || (!buf && !oob))
966 return -EINVAL;
967
968 if (addr >= snf->size)
969 return -EINVAL;
970
971 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
972}
973
974static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
975{
976 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
977 const uint8_t *oobptr = buf;
978 int i, j;
979
980 for (i = 0; i < snf->ecc_steps; i++) {
981 vall = 0;
982 valm = 0;
983
984 for (j = 0; j < 8; j++) {
985 if (j < 4)
986 vall |= (j < fdm_size ? oobptr[j] : 0xff)
987 << (j * 8);
988 else
989 valm |= (j < fdm_size ? oobptr[j] : 0xff)
990 << ((j - 4) * 8);
991 }
992
993 nfi_write32(snf, NFI_FDML(i), vall);
994 nfi_write32(snf, NFI_FDMM(i), valm);
995
996 oobptr += fdm_size;
997 }
998}
999
1000static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
1001 bool raw)
1002{
developerae50ce92021-05-18 19:08:57 +08001003 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +08001004 uintptr_t dma_addr;
1005 int ret;
1006
1007 /* Column address with plane bit */
1008 coladdr = mtk_snand_get_plane_address(snf, page);
1009
1010 mtk_snand_mac_reset(snf);
1011 mtk_nfi_reset(snf);
1012
1013 /* Write FDM registers if necessary */
1014 if (!raw)
1015 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
1016
1017 /* Command */
1018 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
1019
1020 /* Column address */
1021 nfi_write32(snf, SNF_PG_CTL2, coladdr);
1022
1023 /* Set write mode */
1024 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
1025 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
1026
1027 /* Set bytes to write */
1028 rwbytes = snf->ecc_steps * snf->raw_sector_size;
1029 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
1030 rwbytes);
1031
1032 /* NFI write prepare */
1033 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
1034 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1035 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
1036
1037 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
1038
1039 /* Prepare for DMA write */
1040 len = snf->writesize + snf->oobsize;
1041 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
1042 if (ret) {
1043 snand_log_nfi(snf->pdev,
1044 "DMA map to device failed with %d\n", ret);
1045 return ret;
1046 }
1047
1048 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
1049
1050 if (!raw)
1051 mtk_snand_ecc_encoder_start(snf);
1052
1053 /* Prepare for custom write interrupt */
1054 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1055 irq_completion_init(snf->pdev);
1056
1057 /* Trigger NFI into custom mode */
1058 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1059
1060 /* Start DMA write */
1061 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1062 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1063
1064 /* Wait for operation finished */
1065 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1066 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1067 if (ret) {
1068 snand_log_nfi(snf->pdev,
1069 "DMA timed out for program load\n");
1070 goto cleanup;
1071 }
1072
developerab415832021-05-26 09:49:32 +08001073 /* Wait for NFI_SEC_CNTR returning expected value */
1074 ret = read32_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1075 NFI_SEC_CNTR(val) >= snf->ecc_steps,
developerae50ce92021-05-18 19:08:57 +08001076 0, SNFI_POLL_INTERVAL);
1077 if (ret) {
1078 snand_log_nfi(snf->pdev,
1079 "Timed out waiting for BUS_SEC_CNTR\n");
1080 goto cleanup;
1081 }
1082
developerfd40db22021-04-29 10:08:25 +08001083 if (!raw)
1084 mtk_snand_ecc_encoder_stop(snf);
1085
1086cleanup:
1087 /* DMA cleanup */
1088 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1089
1090 /* Stop write */
developer4b9635a2021-05-18 14:22:39 +08001091 nfi_write32(snf, NFI_CON, 0);
1092 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +08001093
1094 /* Clear SNF done flag */
1095 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1096 nfi_write32(snf, SNF_STA_CTL1, 0);
1097
1098 /* Disable interrupt */
1099 nfi_read32(snf, NFI_INTR_STA);
1100 nfi_write32(snf, NFI_INTR_EN, 0);
1101
1102 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1103
1104 return ret;
1105}
1106
1107static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1108 const void *buf, const void *oob,
1109 bool empty_ecc)
1110{
1111 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1112 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1113 const uint8_t *bufptr = buf, *oobptr = oob;
1114 uint8_t *raw_sector;
1115
1116 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1117 for (i = 0; i < snf->ecc_steps; i++) {
1118 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1119
1120 if (buf) {
1121 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1122 bufptr += snf->nfi_soc->sector_size;
1123 }
1124
1125 raw_sector += snf->nfi_soc->sector_size;
1126
1127 if (oob) {
1128 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1129 oobptr += snf->nfi_soc->fdm_size;
1130 raw_sector += snf->nfi_soc->fdm_size;
1131
1132 if (empty_ecc)
1133 memset(raw_sector, 0xff, ecc_bytes);
1134 else
1135 memcpy(raw_sector, eccptr, ecc_bytes);
1136 eccptr += ecc_bytes;
1137 }
1138 }
1139}
1140
1141static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1142 const void *oob)
1143{
1144 const uint8_t *p = buf;
1145 uint32_t i, j;
1146
1147 if (buf) {
1148 for (i = 0; i < snf->writesize; i++) {
1149 if (p[i] != 0xff)
1150 return false;
1151 }
1152 }
1153
1154 if (oob) {
1155 for (j = 0; j < snf->ecc_steps; j++) {
1156 p = oob + j * snf->nfi_soc->fdm_size;
1157
1158 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1159 if (p[i] != 0xff)
1160 return false;
1161 }
1162 }
1163 }
1164
1165 return true;
1166}
1167
1168static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1169 const void *buf, const void *oob,
1170 bool raw, bool format)
1171{
1172 uint64_t die_addr;
1173 bool empty_ecc = false;
1174 uint32_t page;
1175 int ret;
1176
1177 die_addr = mtk_snand_select_die_address(snf, addr);
1178 page = die_addr >> snf->writesize_shift;
1179
1180 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1181 /*
1182 * If the data in the page to be ecc-ed is full 0xff,
1183 * change to raw write mode
1184 */
1185 raw = true;
1186 format = true;
1187
1188 /* fill ecc parity code region with 0xff */
1189 empty_ecc = true;
1190 }
1191
1192 if (raw) {
1193 if (format) {
1194 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1195 mtk_snand_fdm_bm_swap_raw(snf);
1196 mtk_snand_bm_swap_raw(snf);
1197 } else {
1198 memset(snf->page_cache, 0xff,
1199 snf->writesize + snf->oobsize);
1200
1201 if (buf)
1202 memcpy(snf->page_cache, buf, snf->writesize);
1203
1204 if (oob) {
1205 memcpy(snf->page_cache + snf->writesize, oob,
1206 snf->ecc_steps * snf->spare_per_sector);
1207 }
1208 }
1209 } else {
1210 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1211 if (buf)
1212 memcpy(snf->page_cache, buf, snf->writesize);
1213
1214 if (oob) {
1215 memcpy(snf->page_cache + snf->writesize, oob,
1216 snf->ecc_steps * snf->nfi_soc->fdm_size);
1217 }
1218
1219 mtk_snand_fdm_bm_swap(snf);
1220 mtk_snand_bm_swap(snf);
1221 }
1222
1223 ret = mtk_snand_write_enable(snf);
1224 if (ret)
1225 return ret;
1226
1227 ret = mtk_snand_program_load(snf, page, raw);
1228 if (ret)
1229 return ret;
1230
1231 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1232 if (ret)
1233 return ret;
1234
1235 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1236 if (ret < 0) {
1237 snand_log_chip(snf->pdev,
1238 "Page program command timed out on page %u\n",
1239 page);
1240 return ret;
1241 }
1242
1243 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1244 snand_log_chip(snf->pdev,
1245 "Page program failed on page %u\n", page);
1246 return -EIO;
1247 }
1248
1249 return 0;
1250}
1251
1252int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1253 const void *oob, bool raw)
1254{
1255 if (!snf || (!buf && !oob))
1256 return -EINVAL;
1257
1258 if (addr >= snf->size)
1259 return -EINVAL;
1260
1261 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1262}
1263
1264int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1265{
1266 uint64_t die_addr;
1267 uint32_t page, block;
1268 int ret;
1269
1270 if (!snf)
1271 return -EINVAL;
1272
1273 if (addr >= snf->size)
1274 return -EINVAL;
1275
1276 die_addr = mtk_snand_select_die_address(snf, addr);
1277 block = die_addr >> snf->erasesize_shift;
1278 page = block << (snf->erasesize_shift - snf->writesize_shift);
1279
1280 ret = mtk_snand_write_enable(snf);
1281 if (ret)
1282 return ret;
1283
1284 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1285 if (ret)
1286 return ret;
1287
1288 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1289 if (ret < 0) {
1290 snand_log_chip(snf->pdev,
1291 "Block erase command timed out on block %u\n",
1292 block);
1293 return ret;
1294 }
1295
1296 if (ret & SNAND_STATUS_ERASE_FAIL) {
1297 snand_log_chip(snf->pdev,
1298 "Block erase failed on block %u\n", block);
1299 return -EIO;
1300 }
1301
1302 return 0;
1303}
1304
1305static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1306{
1307 int ret;
1308
1309 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1310 false);
1311 if (ret && ret != -EBADMSG)
1312 return ret;
1313
1314 return snf->buf_cache[0] != 0xff;
1315}
1316
1317static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1318{
1319 int ret;
1320
1321 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1322 true);
1323 if (ret && ret != -EBADMSG)
1324 return ret;
1325
1326 return snf->buf_cache[0] != 0xff;
1327}
1328
1329int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1330{
1331 if (!snf)
1332 return -EINVAL;
1333
1334 if (addr >= snf->size)
1335 return -EINVAL;
1336
1337 addr &= ~snf->erasesize_mask;
1338
1339 if (snf->nfi_soc->bbm_swap)
1340 return mtk_snand_block_isbad_std(snf, addr);
1341
1342 return mtk_snand_block_isbad_mtk(snf, addr);
1343}
1344
1345static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1346{
1347 /* Standard BBM position */
1348 memset(snf->buf_cache, 0xff, snf->oobsize);
1349 snf->buf_cache[0] = 0;
1350
1351 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1352 false);
1353}
1354
1355static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1356{
1357 /* Write the whole page with zeros */
1358 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1359
1360 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1361 snf->buf_cache + snf->writesize, true,
1362 true);
1363}
1364
1365int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1366{
1367 if (!snf)
1368 return -EINVAL;
1369
1370 if (addr >= snf->size)
1371 return -EINVAL;
1372
1373 addr &= ~snf->erasesize_mask;
1374
1375 if (snf->nfi_soc->bbm_swap)
1376 return mtk_snand_block_markbad_std(snf, addr);
1377
1378 return mtk_snand_block_markbad_mtk(snf, addr);
1379}
1380
1381int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1382 const uint8_t *oobbuf, size_t ooblen)
1383{
1384 size_t len = ooblen, sect_fdm_len;
1385 const uint8_t *oob = oobbuf;
1386 uint32_t step = 0;
1387
1388 if (!snf || !oobraw || !oob)
1389 return -EINVAL;
1390
1391 while (len && step < snf->ecc_steps) {
1392 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1393 if (sect_fdm_len > len)
1394 sect_fdm_len = len;
1395
1396 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1397 sect_fdm_len);
1398
1399 len -= sect_fdm_len;
1400 oob += sect_fdm_len;
1401 step++;
1402 }
1403
1404 return len;
1405}
1406
1407int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1408 size_t ooblen, const uint8_t *oobraw)
1409{
1410 size_t len = ooblen, sect_fdm_len;
1411 uint8_t *oob = oobbuf;
1412 uint32_t step = 0;
1413
1414 if (!snf || !oobraw || !oob)
1415 return -EINVAL;
1416
1417 while (len && step < snf->ecc_steps) {
1418 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1419 if (sect_fdm_len > len)
1420 sect_fdm_len = len;
1421
1422 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1423 sect_fdm_len);
1424
1425 len -= sect_fdm_len;
1426 oob += sect_fdm_len;
1427 step++;
1428 }
1429
1430 return len;
1431}
1432
1433int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1434 void *buf, void *oob, size_t ooblen,
1435 size_t *actualooblen, bool raw)
1436{
1437 int ret, oobremain;
1438
1439 if (!snf)
1440 return -EINVAL;
1441
1442 if (!oob)
1443 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1444
1445 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1446 if (ret && ret != -EBADMSG) {
1447 if (actualooblen)
1448 *actualooblen = 0;
1449 return ret;
1450 }
1451
1452 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1453 if (actualooblen)
1454 *actualooblen = ooblen - oobremain;
1455
1456 return ret;
1457}
1458
1459int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1460 const void *buf, const void *oob,
1461 size_t ooblen, size_t *actualooblen, bool raw)
1462{
1463 int oobremain;
1464
1465 if (!snf)
1466 return -EINVAL;
1467
1468 if (!oob)
1469 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1470
1471 memset(snf->buf_cache, 0xff, snf->oobsize);
1472 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1473 if (actualooblen)
1474 *actualooblen = ooblen - oobremain;
1475
1476 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1477}
1478
1479int mtk_snand_get_chip_info(struct mtk_snand *snf,
1480 struct mtk_snand_chip_info *info)
1481{
1482 if (!snf || !info)
1483 return -EINVAL;
1484
1485 info->model = snf->model;
1486 info->chipsize = snf->size;
1487 info->blocksize = snf->erasesize;
1488 info->pagesize = snf->writesize;
1489 info->sparesize = snf->oobsize;
1490 info->spare_per_sector = snf->spare_per_sector;
1491 info->fdm_size = snf->nfi_soc->fdm_size;
1492 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1493 info->num_sectors = snf->ecc_steps;
1494 info->sector_size = snf->nfi_soc->sector_size;
1495 info->ecc_strength = snf->ecc_strength;
1496 info->ecc_bytes = snf->ecc_bytes;
1497
1498 return 0;
1499}
1500
1501int mtk_snand_irq_process(struct mtk_snand *snf)
1502{
1503 uint32_t sta, ien;
1504
1505 if (!snf)
1506 return -EINVAL;
1507
1508 sta = nfi_read32(snf, NFI_INTR_STA);
1509 ien = nfi_read32(snf, NFI_INTR_EN);
1510
1511 if (!(sta & ien))
1512 return 0;
1513
1514 nfi_write32(snf, NFI_INTR_EN, 0);
1515 irq_completion_done(snf->pdev);
1516
1517 return 1;
1518}
1519
1520static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1521{
1522 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1523 int i, mul = 1;
1524
1525 /*
1526 * If we're using the 1KB sector size, HW will automatically
1527 * double the spare size. So we should only use half of the value.
1528 */
1529 if (snf->nfi_soc->sector_size == 1024)
1530 mul = 2;
1531
1532 spare_per_step /= mul;
1533
1534 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1535 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1536 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1537 snf->spare_per_sector *= mul;
1538 return i;
1539 }
1540 }
1541
1542 snand_log_nfi(snf->pdev,
1543 "Page size %u+%u is not supported\n", snf->writesize,
1544 snf->oobsize);
1545
1546 return -1;
1547}
1548
1549static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1550{
1551 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1552 uint32_t sector_size_512;
1553
1554 if (snf->nfi_soc->sector_size == 512) {
1555 sector_size_512 = NFI_SEC_SEL_512;
1556 spare_size_shift = NFI_SPARE_SIZE_S;
1557 } else {
1558 sector_size_512 = 0;
1559 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1560 }
1561
1562 switch (snf->writesize) {
1563 case SZ_512:
1564 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1565 break;
1566 case SZ_2K:
1567 if (snf->nfi_soc->sector_size == 512)
1568 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1569 else
1570 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1571 break;
1572 case SZ_4K:
1573 if (snf->nfi_soc->sector_size == 512)
1574 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1575 else
1576 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1577 break;
1578 case SZ_8K:
1579 if (snf->nfi_soc->sector_size == 512)
1580 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1581 else
1582 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1583 break;
1584 case SZ_16K:
1585 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1586 break;
1587 default:
1588 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1589 snf->writesize);
1590 return -ENOTSUPP;
1591 }
1592
1593 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1594 if (unlikely(spare_size_idx < 0))
1595 return -ENOTSUPP;
1596
1597 snf->raw_sector_size = snf->nfi_soc->sector_size +
1598 snf->spare_per_sector;
1599
1600 /* Setup page format */
1601 nfi_write32(snf, NFI_PAGEFMT,
1602 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1603 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1604 (spare_size_idx << spare_size_shift) |
1605 (pagesize_idx << NFI_PAGE_SIZE_S) |
1606 sector_size_512);
1607
1608 return 0;
1609}
1610
1611static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1612 uint32_t snfi_caps, uint8_t *opcode,
1613 uint8_t *dummy,
1614 const struct snand_io_cap *op_cap)
1615{
1616 uint32_t i, caps;
1617
1618 caps = snfi_caps & op_cap->caps;
1619
1620 i = fls(caps);
1621 if (i > 0) {
1622 *opcode = op_cap->opcodes[i - 1].opcode;
1623 if (dummy)
1624 *dummy = op_cap->opcodes[i - 1].dummy;
1625 return i - 1;
1626 }
1627
1628 return __SNAND_IO_MAX;
1629}
1630
1631static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1632 uint32_t snfi_caps,
1633 const struct snand_io_cap *op_cap)
1634{
1635 enum snand_flash_io idx;
1636
1637 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1638 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1639 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1640 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1641 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1642 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1643 };
1644
1645 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1646 &snf->dummy_rfc, op_cap);
1647 if (idx >= __SNAND_IO_MAX) {
1648 snand_log_snfi(snf->pdev,
1649 "No capable opcode for read from cache\n");
1650 return -ENOTSUPP;
1651 }
1652
1653 snf->mode_rfc = rfc_modes[idx];
1654
1655 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1656 snf->quad_spi_op = true;
1657
1658 return 0;
1659}
1660
1661static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1662 const struct snand_io_cap *op_cap)
1663{
1664 enum snand_flash_io idx;
1665
1666 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1667 [SNAND_IO_1_1_1] = 0,
1668 [SNAND_IO_1_1_4] = 1,
1669 };
1670
1671 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1672 NULL, op_cap);
1673 if (idx >= __SNAND_IO_MAX) {
1674 snand_log_snfi(snf->pdev,
1675 "No capable opcode for program load\n");
1676 return -ENOTSUPP;
1677 }
1678
1679 snf->mode_pl = pl_modes[idx];
1680
1681 if (idx == SNAND_IO_1_1_4)
1682 snf->quad_spi_op = true;
1683
1684 return 0;
1685}
1686
1687static int mtk_snand_setup(struct mtk_snand *snf,
1688 const struct snand_flash_info *snand_info)
1689{
1690 const struct snand_mem_org *memorg = &snand_info->memorg;
1691 uint32_t i, msg_size, snfi_caps;
1692 int ret;
1693
1694 /* Calculate flash memory organization */
1695 snf->model = snand_info->model;
1696 snf->writesize = memorg->pagesize;
1697 snf->oobsize = memorg->sparesize;
1698 snf->erasesize = snf->writesize * memorg->pages_per_block;
1699 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1700 snf->size = snf->die_size * memorg->ndies;
1701 snf->num_dies = memorg->ndies;
1702
1703 snf->writesize_mask = snf->writesize - 1;
1704 snf->erasesize_mask = snf->erasesize - 1;
1705 snf->die_mask = snf->die_size - 1;
1706
1707 snf->writesize_shift = ffs(snf->writesize) - 1;
1708 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1709 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1710
1711 snf->select_die = snand_info->select_die;
1712
1713 /* Determine opcodes for read from cache/program load */
1714 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1715 if (snf->snfi_quad_spi)
1716 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1717
1718 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1719 if (ret)
1720 return ret;
1721
1722 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1723 if (ret)
1724 return ret;
1725
1726 /* ECC and page format */
1727 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1728 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1729 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1730 snf->writesize);
1731 return -ENOTSUPP;
1732 }
1733
1734 ret = mtk_snand_pagefmt_setup(snf);
1735 if (ret)
1736 return ret;
1737
1738 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1739 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1740 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1741 msg_size);
1742 if (ret)
1743 return ret;
1744
1745 nfi_write16(snf, NFI_CNFG, 0);
1746
1747 /* Tuning options */
1748 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
developer17ded802021-07-06 20:48:25 +08001749 nfi_write32(snf, SNF_DLY_CTL3, (snf->nfi_soc->sample_delay << SFCK_SAM_DLY_S));
developerfd40db22021-04-29 10:08:25 +08001750
1751 /* Interrupts */
1752 nfi_read32(snf, NFI_INTR_STA);
1753 nfi_write32(snf, NFI_INTR_EN, 0);
1754
1755 /* Clear SNF done flag */
1756 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1757 nfi_write32(snf, SNF_STA_CTL1, 0);
1758
1759 /* Initialization on all dies */
1760 for (i = 0; i < snf->num_dies; i++) {
1761 mtk_snand_select_die(snf, i);
1762
1763 /* Disable On-Die ECC engine */
1764 ret = mtk_snand_ondie_ecc_control(snf, false);
1765 if (ret)
1766 return ret;
1767
1768 /* Disable block protection */
1769 mtk_snand_unlock(snf);
1770
1771 /* Enable/disable quad-spi */
1772 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1773 }
1774
1775 mtk_snand_select_die(snf, 0);
1776
1777 return 0;
1778}
1779
1780static int mtk_snand_id_probe(struct mtk_snand *snf,
1781 const struct snand_flash_info **snand_info)
1782{
1783 uint8_t id[4], op[2];
1784 int ret;
1785
1786 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1787 op[0] = SNAND_CMD_READID;
1788 op[1] = 0;
1789 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1790 if (ret)
1791 return ret;
1792
1793 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1794 if (*snand_info)
1795 return 0;
1796
1797 /* Read SPI-NAND JEDEC ID, OP + ID */
1798 op[0] = SNAND_CMD_READID;
1799 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1800 if (ret)
1801 return ret;
1802
1803 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1804 if (*snand_info)
1805 return 0;
1806
1807 snand_log_chip(snf->pdev,
1808 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1809 id[0], id[1], id[2], id[3]);
1810
1811 return -EINVAL;
1812}
1813
1814int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1815 struct mtk_snand **psnf)
1816{
1817 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001818 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001819 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001820 int ret;
1821
1822 if (!pdata || !psnf)
1823 return -EINVAL;
1824
1825 if (pdata->soc >= __SNAND_SOC_MAX) {
1826 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1827 pdata->soc);
1828 return -EINVAL;
1829 }
1830
1831 /* Dummy instance only for initial reset and id probe */
1832 tmpsnf.nfi_base = pdata->nfi_base;
1833 tmpsnf.ecc_base = pdata->ecc_base;
1834 tmpsnf.soc = pdata->soc;
1835 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1836 tmpsnf.pdev = dev;
1837
1838 /* Switch to SNFI mode */
1839 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1840
1841 /* Reset SNFI & NFI */
1842 mtk_snand_mac_reset(&tmpsnf);
1843 mtk_nfi_reset(&tmpsnf);
1844
1845 /* Reset SPI-NAND chip */
1846 ret = mtk_snand_chip_reset(&tmpsnf);
1847 if (ret) {
1848 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1849 return ret;
1850 }
1851
1852 /* Probe SPI-NAND flash by JEDEC ID */
1853 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1854 if (ret)
1855 return ret;
1856
1857 rawpage_size = snand_info->memorg.pagesize +
1858 snand_info->memorg.sparesize;
1859
developer4da1bed2021-05-08 17:30:37 +08001860 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1861 sizeof(*snf->sect_bf);
1862
developerfd40db22021-04-29 10:08:25 +08001863 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001864 snf = generic_mem_alloc(dev,
1865 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001866 if (!snf) {
1867 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1868 return -ENOMEM;
1869 }
1870
developer4da1bed2021-05-08 17:30:37 +08001871 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1872 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001873
1874 /* Allocate memory for DMA buffer */
1875 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1876 if (!snf->page_cache) {
1877 generic_mem_free(dev, snf);
1878 snand_log_chip(dev,
1879 "Failed to allocate memory for DMA buffer\n");
1880 return -ENOMEM;
1881 }
1882
1883 /* Fill up instance */
1884 snf->pdev = dev;
1885 snf->nfi_base = pdata->nfi_base;
1886 snf->ecc_base = pdata->ecc_base;
1887 snf->soc = pdata->soc;
1888 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1889 snf->snfi_quad_spi = pdata->quad_spi;
1890
1891 /* Initialize SNFI & ECC engine */
1892 ret = mtk_snand_setup(snf, snand_info);
1893 if (ret) {
1894 dma_mem_free(dev, snf->page_cache);
1895 generic_mem_free(dev, snf);
1896 return ret;
1897 }
1898
1899 *psnf = snf;
1900
1901 return 0;
1902}
1903
1904int mtk_snand_cleanup(struct mtk_snand *snf)
1905{
1906 if (!snf)
1907 return 0;
1908
1909 dma_mem_free(snf->pdev, snf->page_cache);
1910 generic_mem_free(snf->pdev, snf);
1911
1912 return 0;
1913}