blob: 0d880538abc371e863ff45b68fd9ad419d7b32d0 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
developerae50ce92021-05-18 19:08:57 +080060#define NFI_ADDRCNTR 0x070
61#define SEC_CNTR GENMASK(16, 12)
developerab415832021-05-26 09:49:32 +080062#define SEC_CNTR_S 12
63#define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080064
developerfd40db22021-04-29 10:08:25 +080065#define NFI_STRADDR 0x080
66
developerae50ce92021-05-18 19:08:57 +080067#define NFI_BYTELEN 0x084
developerab415832021-05-26 09:49:32 +080068#define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080069
developerfd40db22021-04-29 10:08:25 +080070#define NFI_FDM0L 0x0a0
71#define NFI_FDM0M 0x0a4
72#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
73#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
74
75#define NFI_DEBUG_CON1 0x220
76#define WBUF_EN BIT(2)
77
78#define NFI_MASTERSTA 0x224
79#define MAS_ADDR GENMASK(11, 9)
80#define MAS_RD GENMASK(8, 6)
81#define MAS_WR GENMASK(5, 3)
82#define MAS_RDDLY GENMASK(2, 0)
83#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
84#define AHB_BUS_BUSY BIT(1)
85#define BUS_BUSY BIT(0)
86#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
87
88/* SNFI registers */
89#define SNF_MAC_CTL 0x500
90#define MAC_XIO_SEL BIT(4)
91#define SF_MAC_EN BIT(3)
92#define SF_TRIG BIT(2)
93#define WIP_READY BIT(1)
94#define WIP BIT(0)
95
96#define SNF_MAC_OUTL 0x504
97#define SNF_MAC_INL 0x508
98
99#define SNF_RD_CTL2 0x510
100#define DATA_READ_DUMMY_S 8
101#define DATA_READ_CMD_S 0
102
103#define SNF_RD_CTL3 0x514
104
105#define SNF_PG_CTL1 0x524
106#define PG_LOAD_CMD_S 8
107
108#define SNF_PG_CTL2 0x528
109
110#define SNF_MISC_CTL 0x538
111#define SW_RST BIT(28)
112#define FIFO_RD_LTC_S 25
113#define PG_LOAD_X4_EN BIT(20)
114#define DATA_READ_MODE_S 16
115#define DATA_READ_MODE GENMASK(18, 16)
116#define DATA_READ_MODE_X1 0
117#define DATA_READ_MODE_X2 1
118#define DATA_READ_MODE_X4 2
119#define DATA_READ_MODE_DUAL 5
120#define DATA_READ_MODE_QUAD 6
developer17ded802021-07-06 20:48:25 +0800121#define LATCH_LAT_S 8
122#define LATCH_LAT GENMASK(9, 8)
developerfd40db22021-04-29 10:08:25 +0800123#define PG_LOAD_CUSTOM_EN BIT(7)
124#define DATARD_CUSTOM_EN BIT(6)
125#define CS_DESELECT_CYC_S 0
126
127#define SNF_MISC_CTL2 0x53c
128#define PROGRAM_LOAD_BYTE_NUM_S 16
129#define READ_DATA_BYTE_NUM_S 11
130
131#define SNF_DLY_CTL3 0x548
132#define SFCK_SAM_DLY_S 0
133
134#define SNF_STA_CTL1 0x550
135#define CUS_PG_DONE BIT(28)
136#define CUS_READ_DONE BIT(27)
137#define SPI_STATE_S 0
138#define SPI_STATE GENMASK(3, 0)
139
140#define SNF_CFG 0x55c
141#define SPI_MODE BIT(0)
142
143#define SNF_GPRAM 0x800
144#define SNF_GPRAM_SIZE 0xa0
145
146#define SNFI_POLL_INTERVAL 1000000
147
148static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
149
150static const uint8_t mt7986_spare_sizes[] = {
151 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
152 67, 74
153};
154
155static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
156 [SNAND_SOC_MT7622] = {
157 .sector_size = 512,
158 .max_sectors = 8,
159 .fdm_size = 8,
160 .fdm_ecc_size = 1,
161 .fifo_size = 32,
162 .bbm_swap = false,
163 .empty_page_check = false,
164 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
165 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800166 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
167 .latch_lat = 0,
168 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800169 },
170 [SNAND_SOC_MT7629] = {
171 .sector_size = 512,
172 .max_sectors = 8,
173 .fdm_size = 8,
174 .fdm_ecc_size = 1,
175 .fifo_size = 32,
176 .bbm_swap = true,
177 .empty_page_check = false,
178 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
179 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800180 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
181 .latch_lat = 0,
182 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800183 },
184 [SNAND_SOC_MT7986] = {
185 .sector_size = 1024,
186 .max_sectors = 16,
187 .fdm_size = 8,
188 .fdm_ecc_size = 1,
189 .fifo_size = 64,
190 .bbm_swap = true,
191 .empty_page_check = true,
192 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
193 .spare_sizes = mt7986_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800194 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes),
195 .latch_lat = 1,
196 .sample_delay = 8
developerfd40db22021-04-29 10:08:25 +0800197 },
198};
199
200static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
201{
202 return readl(snf->nfi_base + reg);
203}
204
205static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
206 uint32_t val)
207{
208 writel(val, snf->nfi_base + reg);
209}
210
211static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
212 uint16_t val)
213{
214 writew(val, snf->nfi_base + reg);
215}
216
217static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
218 uint32_t set)
219{
220 uint32_t val;
221
222 val = readl(snf->nfi_base + reg);
223 val &= ~clr;
224 val |= set;
225 writel(val, snf->nfi_base + reg);
226}
227
228static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
229 const uint8_t *data, uint32_t len)
230{
231 uint32_t i, val = 0, es = sizeof(uint32_t);
232
233 for (i = reg; i < reg + len; i++) {
234 val |= ((uint32_t)*data++) << (8 * (i % es));
235
236 if (i % es == es - 1 || i == reg + len - 1) {
237 nfi_write32(snf, i & ~(es - 1), val);
238 val = 0;
239 }
240 }
241}
242
243static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
244 uint32_t len)
245{
246 uint32_t i, val = 0, es = sizeof(uint32_t);
247
248 for (i = reg; i < reg + len; i++) {
249 if (i == reg || i % es == 0)
250 val = nfi_read32(snf, i & ~(es - 1));
251
252 *data++ = (uint8_t)(val >> (8 * (i % es)));
253 }
254}
255
256static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
257{
258 uint8_t tmp = *bm1;
259 *bm1 = *bm2;
260 *bm2 = tmp;
261}
262
263static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
264{
265 uint32_t fdm_bbm_pos;
266
267 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
268 return;
269
270 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
271 snf->nfi_soc->sector_size;
272 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
273 &snf->page_cache[snf->writesize]);
274}
275
276static void mtk_snand_bm_swap(struct mtk_snand *snf)
277{
278 uint32_t buf_bbm_pos, fdm_bbm_pos;
279
280 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
281 return;
282
283 buf_bbm_pos = snf->writesize -
284 (snf->ecc_steps - 1) * snf->spare_per_sector;
285 fdm_bbm_pos = snf->writesize +
286 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
287 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
288 &snf->page_cache[buf_bbm_pos]);
289}
290
291static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
292{
293 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
294
295 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
296 return;
297
298 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
299 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
300 snf->nfi_soc->sector_size;
301 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
302 &snf->page_cache[fdm_bbm_pos2]);
303}
304
305static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
306{
307 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
308
309 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
310 return;
311
312 fdm_bbm_pos1 = snf->writesize;
313 fdm_bbm_pos2 = snf->writesize +
314 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
315 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
316 &snf->page_cache[fdm_bbm_pos2]);
317}
318
319static int mtk_nfi_reset(struct mtk_snand *snf)
320{
321 uint32_t val, fifo_mask;
322 int ret;
323
324 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
325
326 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
327 !(val & snf->nfi_soc->mastersta_mask), 0,
328 SNFI_POLL_INTERVAL);
329 if (ret) {
330 snand_log_nfi(snf->pdev,
331 "NFI master is still busy after reset\n");
332 return ret;
333 }
334
335 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
336 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
337 SNFI_POLL_INTERVAL);
338 if (ret) {
339 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
340 return ret;
341 }
342
343 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
344 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
345 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
346 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
347 if (ret) {
348 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
349 return ret;
350 }
351
352 return 0;
353}
354
355static int mtk_snand_mac_reset(struct mtk_snand *snf)
356{
357 int ret;
358 uint32_t val;
359
360 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
361
362 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
363 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
364 if (ret)
365 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
366
367 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
developer17ded802021-07-06 20:48:25 +0800368 (10 << CS_DESELECT_CYC_S) | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800369
370 return ret;
371}
372
373static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
374 uint32_t inlen)
375{
376 int ret;
377 uint32_t val;
378
379 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
380 nfi_write32(snf, SNF_MAC_OUTL, outlen);
381 nfi_write32(snf, SNF_MAC_INL, inlen);
382
383 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
384
385 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
386 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
387 if (ret) {
388 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
389 goto cleanup;
390 }
391
392 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
393 !(val & WIP), 0, SNFI_POLL_INTERVAL);
394 if (ret) {
395 snand_log_snfi(snf->pdev,
396 "Timed out waiting for WIP cleared\n");
397 }
398
399cleanup:
400 nfi_write32(snf, SNF_MAC_CTL, 0);
401
402 return ret;
403}
404
405int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
406 uint8_t *in, uint32_t inlen)
407{
408 int ret;
409
410 if (outlen + inlen > SNF_GPRAM_SIZE)
411 return -EINVAL;
412
413 mtk_snand_mac_reset(snf);
414
415 nfi_write_data(snf, SNF_GPRAM, out, outlen);
416
417 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
418 if (ret)
419 return ret;
420
421 if (!inlen)
422 return 0;
423
424 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
425
426 return 0;
427}
428
429static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
430{
431 uint8_t op[2], val;
432 int ret;
433
434 op[0] = SNAND_CMD_GET_FEATURE;
435 op[1] = (uint8_t)addr;
436
437 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
438 if (ret)
439 return ret;
440
441 return val;
442}
443
444int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
445{
446 uint8_t op[3];
447
448 op[0] = SNAND_CMD_SET_FEATURE;
449 op[1] = (uint8_t)addr;
450 op[2] = (uint8_t)val;
451
452 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
453}
454
455static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
456{
457 int val;
458 mtk_snand_time_t time_start, tmo;
459
460 time_start = timer_get_ticks();
461 tmo = timer_time_to_tick(wait_us);
462
463 do {
464 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
465 if (!(val & SNAND_STATUS_OIP))
466 return val & (SNAND_STATUS_ERASE_FAIL |
467 SNAND_STATUS_PROGRAM_FAIL);
468 } while (!timer_is_timeout(time_start, tmo));
469
470 return -ETIMEDOUT;
471}
472
473int mtk_snand_chip_reset(struct mtk_snand *snf)
474{
475 uint8_t op = SNAND_CMD_RESET;
476 int ret;
477
478 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
479 if (ret)
480 return ret;
481
482 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
483 if (ret < 0)
484 return ret;
485
486 return 0;
487}
488
489static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
490 uint8_t set)
491{
492 int val, newval;
493 int ret;
494
495 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
496 if (val < 0) {
497 snand_log_chip(snf->pdev,
498 "Failed to get configuration feature\n");
499 return val;
500 }
501
502 newval = (val & (~clr)) | set;
503
504 if (newval == val)
505 return 0;
506
507 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
508 (uint8_t)newval);
509 if (val < 0) {
510 snand_log_chip(snf->pdev,
511 "Failed to set configuration feature\n");
512 return ret;
513 }
514
515 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
516 if (val < 0) {
517 snand_log_chip(snf->pdev,
518 "Failed to get configuration feature\n");
519 return val;
520 }
521
522 if (newval != val)
523 return -ENOTSUPP;
524
525 return 0;
526}
527
528static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
529{
530 int ret;
531
532 if (enable)
533 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
534 else
535 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
536
537 if (ret) {
538 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
539 enable ? "enable" : "disable");
540 }
541
542 return ret;
543}
544
545static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
546{
547 int ret;
548
549 if (enable) {
550 ret = mtk_snand_config_feature(snf, 0,
551 SNAND_FEATURE_QUAD_ENABLE);
552 } else {
553 ret = mtk_snand_config_feature(snf,
554 SNAND_FEATURE_QUAD_ENABLE, 0);
555 }
556
557 if (ret) {
558 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
559 enable ? "enable" : "disable");
560 }
561
562 return ret;
563}
564
565static int mtk_snand_unlock(struct mtk_snand *snf)
566{
567 int ret;
568
569 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
570 if (ret) {
571 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
572 return ret;
573 }
574
575 return 0;
576}
577
578static int mtk_snand_write_enable(struct mtk_snand *snf)
579{
580 uint8_t op = SNAND_CMD_WRITE_ENABLE;
581 int ret, val;
582
583 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
584 if (ret)
585 return ret;
586
587 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
588 if (val < 0)
589 return ret;
590
591 if (val & SNAND_STATUS_WEL)
592 return 0;
593
594 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
595
596 return -ENOTSUPP;
597}
598
599static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
600{
601 if (!snf->select_die)
602 return 0;
603
604 return snf->select_die(snf, dieidx);
605}
606
607static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
608 uint64_t addr)
609{
610 uint32_t dieidx;
611
612 if (!snf->select_die)
613 return addr;
614
615 dieidx = addr >> snf->die_shift;
616
617 mtk_snand_select_die(snf, dieidx);
618
619 return addr & snf->die_mask;
620}
621
622static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
623 uint32_t page)
624{
625 uint32_t pages_per_block;
626
627 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
628
629 if (page & pages_per_block)
630 return 1 << (snf->writesize_shift + 1);
631
632 return 0;
633}
634
635static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
636{
637 uint8_t op[4];
638
639 op[0] = cmd;
640 op[1] = (page >> 16) & 0xff;
641 op[2] = (page >> 8) & 0xff;
642 op[3] = page & 0xff;
643
644 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
645}
646
647static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
648{
649 uint32_t vall, valm;
650 uint8_t *oobptr = buf;
651 int i, j;
652
653 for (i = 0; i < snf->ecc_steps; i++) {
654 vall = nfi_read32(snf, NFI_FDML(i));
655 valm = nfi_read32(snf, NFI_FDMM(i));
656
657 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
658 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
659
660 oobptr += snf->nfi_soc->fdm_size;
661 }
662}
663
developer4da1bed2021-05-08 17:30:37 +0800664static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
665 uint32_t sect, uint8_t *oob)
666{
667 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
668 uint32_t coladdr, raw_offs, offs;
669 uint8_t op[4];
670
671 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
672 snand_log_snfi(snf->pdev,
673 "ECC parity size does not fit the GPRAM\n");
674 return -ENOTSUPP;
675 }
676
677 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
678 snf->nfi_soc->fdm_size;
679 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
680
681 /* Column address with plane bit */
682 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
683
684 op[0] = SNAND_CMD_READ_FROM_CACHE;
685 op[1] = (coladdr >> 8) & 0xff;
686 op[2] = coladdr & 0xff;
687 op[3] = 0;
688
689 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
690}
691
692static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
693{
694 uint8_t *oob = snf->page_cache + snf->writesize;
695 int i, rc, ret = 0, max_bitflips = 0;
696
697 for (i = 0; i < snf->ecc_steps; i++) {
698 if (snf->sect_bf[i] >= 0) {
699 if (snf->sect_bf[i] > max_bitflips)
700 max_bitflips = snf->sect_bf[i];
701 continue;
702 }
703
704 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
705 if (rc)
706 return rc;
707
708 rc = mtk_ecc_fixup_empty_sector(snf, i);
709 if (rc < 0) {
710 ret = -EBADMSG;
711
712 snand_log_ecc(snf->pdev,
713 "Uncorrectable bitflips in page %u sect %u\n",
714 page, i);
developer5136a3f2021-05-20 10:58:54 +0800715 } else if (rc) {
developer4da1bed2021-05-08 17:30:37 +0800716 snf->sect_bf[i] = rc;
717
718 if (snf->sect_bf[i] > max_bitflips)
719 max_bitflips = snf->sect_bf[i];
720
721 snand_log_ecc(snf->pdev,
722 "%u bitflip%s corrected in page %u sect %u\n",
723 rc, rc > 1 ? "s" : "", page, i);
developer5136a3f2021-05-20 10:58:54 +0800724 } else {
725 snf->sect_bf[i] = 0;
developer4da1bed2021-05-08 17:30:37 +0800726 }
727 }
728
729 return ret ? ret : max_bitflips;
730}
731
developerfd40db22021-04-29 10:08:25 +0800732static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
733{
developerae50ce92021-05-18 19:08:57 +0800734 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800735 uintptr_t dma_addr;
736 int ret;
737
738 /* Column address with plane bit */
739 coladdr = mtk_snand_get_plane_address(snf, page);
740
741 mtk_snand_mac_reset(snf);
742 mtk_nfi_reset(snf);
743
744 /* Command and dummy cycles */
745 nfi_write32(snf, SNF_RD_CTL2,
746 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
747 (snf->opcode_rfc << DATA_READ_CMD_S));
748
749 /* Column address */
750 nfi_write32(snf, SNF_RD_CTL3, coladdr);
751
752 /* Set read mode */
753 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
developer17ded802021-07-06 20:48:25 +0800754 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
755 mode | DATARD_CUSTOM_EN | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800756
757 /* Set bytes to read */
758 rwbytes = snf->ecc_steps * snf->raw_sector_size;
759 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
760 rwbytes);
761
762 /* NFI read prepare */
763 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
764 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
765 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
766
767 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
768
769 /* Prepare for DMA read */
770 len = snf->writesize + snf->oobsize;
771 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
772 if (ret) {
773 snand_log_nfi(snf->pdev,
774 "DMA map from device failed with %d\n", ret);
775 return ret;
776 }
777
778 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
779
780 if (!raw)
781 mtk_snand_ecc_decoder_start(snf);
782
783 /* Prepare for custom read interrupt */
784 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
785 irq_completion_init(snf->pdev);
786
787 /* Trigger NFI into custom mode */
788 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
789
790 /* Start DMA read */
791 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
792 nfi_write16(snf, NFI_STRDATA, STR_DATA);
793
794 /* Wait for operation finished */
795 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
796 CUS_READ_DONE, SNFI_POLL_INTERVAL);
797 if (ret) {
798 snand_log_nfi(snf->pdev,
799 "DMA timed out for reading from cache\n");
800 goto cleanup;
801 }
802
developerae50ce92021-05-18 19:08:57 +0800803 /* Wait for BUS_SEC_CNTR returning expected value */
804 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
805 BUS_SEC_CNTR(val) >= snf->ecc_steps,
806 0, SNFI_POLL_INTERVAL);
807 if (ret) {
808 snand_log_nfi(snf->pdev,
809 "Timed out waiting for BUS_SEC_CNTR\n");
810 goto cleanup;
811 }
812
813 /* Wait for bus becoming idle */
814 ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
815 !(val & snf->nfi_soc->mastersta_mask),
816 0, SNFI_POLL_INTERVAL);
817 if (ret) {
818 snand_log_nfi(snf->pdev,
819 "Timed out waiting for bus becoming idle\n");
820 goto cleanup;
821 }
822
developerfd40db22021-04-29 10:08:25 +0800823 if (!raw) {
824 ret = mtk_ecc_wait_decoder_done(snf);
825 if (ret)
826 goto cleanup;
827
828 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
829
developer4da1bed2021-05-08 17:30:37 +0800830 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800831 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800832
833 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800834 }
835
836cleanup:
837 /* DMA cleanup */
838 dma_mem_unmap(snf->pdev, dma_addr, len, false);
839
840 /* Stop read */
841 nfi_write32(snf, NFI_CON, 0);
developer4b9635a2021-05-18 14:22:39 +0800842 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +0800843
844 /* Clear SNF done flag */
845 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
846 nfi_write32(snf, SNF_STA_CTL1, 0);
847
848 /* Disable interrupt */
849 nfi_read32(snf, NFI_INTR_STA);
850 nfi_write32(snf, NFI_INTR_EN, 0);
851
developer17ded802021-07-06 20:48:25 +0800852 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN | LATCH_LAT, 0);
developerfd40db22021-04-29 10:08:25 +0800853
854 return ret;
855}
856
857static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
858{
859 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
860 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
861 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
862
863 for (i = 0; i < snf->ecc_steps; i++) {
864 raw_sector = snf->page_cache + i * snf->raw_sector_size;
865
866 if (buf) {
867 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
868 bufptr += snf->nfi_soc->sector_size;
869 }
870
871 raw_sector += snf->nfi_soc->sector_size;
872
873 if (oob) {
874 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
875 oobptr += snf->nfi_soc->fdm_size;
876 raw_sector += snf->nfi_soc->fdm_size;
877
878 memcpy(eccptr, raw_sector, ecc_bytes);
879 eccptr += ecc_bytes;
880 }
881 }
882}
883
884static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
885 void *buf, void *oob, bool raw, bool format)
886{
887 uint64_t die_addr;
888 uint32_t page;
889 int ret;
890
891 die_addr = mtk_snand_select_die_address(snf, addr);
892 page = die_addr >> snf->writesize_shift;
893
894 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
895 if (ret)
896 return ret;
897
898 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
899 if (ret < 0) {
900 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
901 return ret;
902 }
903
904 ret = mtk_snand_read_cache(snf, page, raw);
905 if (ret < 0 && ret != -EBADMSG)
906 return ret;
907
908 if (raw) {
909 if (format) {
910 mtk_snand_bm_swap_raw(snf);
911 mtk_snand_fdm_bm_swap_raw(snf);
912 mtk_snand_from_raw_page(snf, buf, oob);
913 } else {
914 if (buf)
915 memcpy(buf, snf->page_cache, snf->writesize);
916
917 if (oob) {
918 memset(oob, 0xff, snf->oobsize);
919 memcpy(oob, snf->page_cache + snf->writesize,
920 snf->ecc_steps * snf->spare_per_sector);
921 }
922 }
923 } else {
924 mtk_snand_bm_swap(snf);
925 mtk_snand_fdm_bm_swap(snf);
926
927 if (buf)
928 memcpy(buf, snf->page_cache, snf->writesize);
929
930 if (oob) {
931 memset(oob, 0xff, snf->oobsize);
932 memcpy(oob, snf->page_cache + snf->writesize,
933 snf->ecc_steps * snf->nfi_soc->fdm_size);
934 }
935 }
936
937 return ret;
938}
939
940int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
941 void *oob, bool raw)
942{
943 if (!snf || (!buf && !oob))
944 return -EINVAL;
945
946 if (addr >= snf->size)
947 return -EINVAL;
948
949 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
950}
951
952static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
953{
954 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
955 const uint8_t *oobptr = buf;
956 int i, j;
957
958 for (i = 0; i < snf->ecc_steps; i++) {
959 vall = 0;
960 valm = 0;
961
962 for (j = 0; j < 8; j++) {
963 if (j < 4)
964 vall |= (j < fdm_size ? oobptr[j] : 0xff)
965 << (j * 8);
966 else
967 valm |= (j < fdm_size ? oobptr[j] : 0xff)
968 << ((j - 4) * 8);
969 }
970
971 nfi_write32(snf, NFI_FDML(i), vall);
972 nfi_write32(snf, NFI_FDMM(i), valm);
973
974 oobptr += fdm_size;
975 }
976}
977
978static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
979 bool raw)
980{
developerae50ce92021-05-18 19:08:57 +0800981 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800982 uintptr_t dma_addr;
983 int ret;
984
985 /* Column address with plane bit */
986 coladdr = mtk_snand_get_plane_address(snf, page);
987
988 mtk_snand_mac_reset(snf);
989 mtk_nfi_reset(snf);
990
991 /* Write FDM registers if necessary */
992 if (!raw)
993 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
994
995 /* Command */
996 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
997
998 /* Column address */
999 nfi_write32(snf, SNF_PG_CTL2, coladdr);
1000
1001 /* Set write mode */
1002 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
1003 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
1004
1005 /* Set bytes to write */
1006 rwbytes = snf->ecc_steps * snf->raw_sector_size;
1007 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
1008 rwbytes);
1009
1010 /* NFI write prepare */
1011 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
1012 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1013 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
1014
1015 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
1016
1017 /* Prepare for DMA write */
1018 len = snf->writesize + snf->oobsize;
1019 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
1020 if (ret) {
1021 snand_log_nfi(snf->pdev,
1022 "DMA map to device failed with %d\n", ret);
1023 return ret;
1024 }
1025
1026 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
1027
1028 if (!raw)
1029 mtk_snand_ecc_encoder_start(snf);
1030
1031 /* Prepare for custom write interrupt */
1032 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1033 irq_completion_init(snf->pdev);
1034
1035 /* Trigger NFI into custom mode */
1036 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1037
1038 /* Start DMA write */
1039 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1040 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1041
1042 /* Wait for operation finished */
1043 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1044 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1045 if (ret) {
1046 snand_log_nfi(snf->pdev,
1047 "DMA timed out for program load\n");
1048 goto cleanup;
1049 }
1050
developerab415832021-05-26 09:49:32 +08001051 /* Wait for NFI_SEC_CNTR returning expected value */
1052 ret = read32_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1053 NFI_SEC_CNTR(val) >= snf->ecc_steps,
developerae50ce92021-05-18 19:08:57 +08001054 0, SNFI_POLL_INTERVAL);
1055 if (ret) {
1056 snand_log_nfi(snf->pdev,
1057 "Timed out waiting for BUS_SEC_CNTR\n");
1058 goto cleanup;
1059 }
1060
developerfd40db22021-04-29 10:08:25 +08001061 if (!raw)
1062 mtk_snand_ecc_encoder_stop(snf);
1063
1064cleanup:
1065 /* DMA cleanup */
1066 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1067
1068 /* Stop write */
developer4b9635a2021-05-18 14:22:39 +08001069 nfi_write32(snf, NFI_CON, 0);
1070 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +08001071
1072 /* Clear SNF done flag */
1073 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1074 nfi_write32(snf, SNF_STA_CTL1, 0);
1075
1076 /* Disable interrupt */
1077 nfi_read32(snf, NFI_INTR_STA);
1078 nfi_write32(snf, NFI_INTR_EN, 0);
1079
1080 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1081
1082 return ret;
1083}
1084
1085static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1086 const void *buf, const void *oob,
1087 bool empty_ecc)
1088{
1089 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1090 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1091 const uint8_t *bufptr = buf, *oobptr = oob;
1092 uint8_t *raw_sector;
1093
1094 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1095 for (i = 0; i < snf->ecc_steps; i++) {
1096 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1097
1098 if (buf) {
1099 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1100 bufptr += snf->nfi_soc->sector_size;
1101 }
1102
1103 raw_sector += snf->nfi_soc->sector_size;
1104
1105 if (oob) {
1106 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1107 oobptr += snf->nfi_soc->fdm_size;
1108 raw_sector += snf->nfi_soc->fdm_size;
1109
1110 if (empty_ecc)
1111 memset(raw_sector, 0xff, ecc_bytes);
1112 else
1113 memcpy(raw_sector, eccptr, ecc_bytes);
1114 eccptr += ecc_bytes;
1115 }
1116 }
1117}
1118
1119static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1120 const void *oob)
1121{
1122 const uint8_t *p = buf;
1123 uint32_t i, j;
1124
1125 if (buf) {
1126 for (i = 0; i < snf->writesize; i++) {
1127 if (p[i] != 0xff)
1128 return false;
1129 }
1130 }
1131
1132 if (oob) {
1133 for (j = 0; j < snf->ecc_steps; j++) {
1134 p = oob + j * snf->nfi_soc->fdm_size;
1135
1136 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1137 if (p[i] != 0xff)
1138 return false;
1139 }
1140 }
1141 }
1142
1143 return true;
1144}
1145
1146static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1147 const void *buf, const void *oob,
1148 bool raw, bool format)
1149{
1150 uint64_t die_addr;
1151 bool empty_ecc = false;
1152 uint32_t page;
1153 int ret;
1154
1155 die_addr = mtk_snand_select_die_address(snf, addr);
1156 page = die_addr >> snf->writesize_shift;
1157
1158 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1159 /*
1160 * If the data in the page to be ecc-ed is full 0xff,
1161 * change to raw write mode
1162 */
1163 raw = true;
1164 format = true;
1165
1166 /* fill ecc parity code region with 0xff */
1167 empty_ecc = true;
1168 }
1169
1170 if (raw) {
1171 if (format) {
1172 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1173 mtk_snand_fdm_bm_swap_raw(snf);
1174 mtk_snand_bm_swap_raw(snf);
1175 } else {
1176 memset(snf->page_cache, 0xff,
1177 snf->writesize + snf->oobsize);
1178
1179 if (buf)
1180 memcpy(snf->page_cache, buf, snf->writesize);
1181
1182 if (oob) {
1183 memcpy(snf->page_cache + snf->writesize, oob,
1184 snf->ecc_steps * snf->spare_per_sector);
1185 }
1186 }
1187 } else {
1188 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1189 if (buf)
1190 memcpy(snf->page_cache, buf, snf->writesize);
1191
1192 if (oob) {
1193 memcpy(snf->page_cache + snf->writesize, oob,
1194 snf->ecc_steps * snf->nfi_soc->fdm_size);
1195 }
1196
1197 mtk_snand_fdm_bm_swap(snf);
1198 mtk_snand_bm_swap(snf);
1199 }
1200
1201 ret = mtk_snand_write_enable(snf);
1202 if (ret)
1203 return ret;
1204
1205 ret = mtk_snand_program_load(snf, page, raw);
1206 if (ret)
1207 return ret;
1208
1209 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1210 if (ret)
1211 return ret;
1212
1213 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1214 if (ret < 0) {
1215 snand_log_chip(snf->pdev,
1216 "Page program command timed out on page %u\n",
1217 page);
1218 return ret;
1219 }
1220
1221 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1222 snand_log_chip(snf->pdev,
1223 "Page program failed on page %u\n", page);
1224 return -EIO;
1225 }
1226
1227 return 0;
1228}
1229
1230int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1231 const void *oob, bool raw)
1232{
1233 if (!snf || (!buf && !oob))
1234 return -EINVAL;
1235
1236 if (addr >= snf->size)
1237 return -EINVAL;
1238
1239 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1240}
1241
1242int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1243{
1244 uint64_t die_addr;
1245 uint32_t page, block;
1246 int ret;
1247
1248 if (!snf)
1249 return -EINVAL;
1250
1251 if (addr >= snf->size)
1252 return -EINVAL;
1253
1254 die_addr = mtk_snand_select_die_address(snf, addr);
1255 block = die_addr >> snf->erasesize_shift;
1256 page = block << (snf->erasesize_shift - snf->writesize_shift);
1257
1258 ret = mtk_snand_write_enable(snf);
1259 if (ret)
1260 return ret;
1261
1262 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1263 if (ret)
1264 return ret;
1265
1266 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1267 if (ret < 0) {
1268 snand_log_chip(snf->pdev,
1269 "Block erase command timed out on block %u\n",
1270 block);
1271 return ret;
1272 }
1273
1274 if (ret & SNAND_STATUS_ERASE_FAIL) {
1275 snand_log_chip(snf->pdev,
1276 "Block erase failed on block %u\n", block);
1277 return -EIO;
1278 }
1279
1280 return 0;
1281}
1282
1283static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1284{
1285 int ret;
1286
1287 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1288 false);
1289 if (ret && ret != -EBADMSG)
1290 return ret;
1291
1292 return snf->buf_cache[0] != 0xff;
1293}
1294
1295static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1296{
1297 int ret;
1298
1299 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1300 true);
1301 if (ret && ret != -EBADMSG)
1302 return ret;
1303
1304 return snf->buf_cache[0] != 0xff;
1305}
1306
1307int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1308{
1309 if (!snf)
1310 return -EINVAL;
1311
1312 if (addr >= snf->size)
1313 return -EINVAL;
1314
1315 addr &= ~snf->erasesize_mask;
1316
1317 if (snf->nfi_soc->bbm_swap)
1318 return mtk_snand_block_isbad_std(snf, addr);
1319
1320 return mtk_snand_block_isbad_mtk(snf, addr);
1321}
1322
1323static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1324{
1325 /* Standard BBM position */
1326 memset(snf->buf_cache, 0xff, snf->oobsize);
1327 snf->buf_cache[0] = 0;
1328
1329 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1330 false);
1331}
1332
1333static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1334{
1335 /* Write the whole page with zeros */
1336 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1337
1338 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1339 snf->buf_cache + snf->writesize, true,
1340 true);
1341}
1342
1343int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1344{
1345 if (!snf)
1346 return -EINVAL;
1347
1348 if (addr >= snf->size)
1349 return -EINVAL;
1350
1351 addr &= ~snf->erasesize_mask;
1352
1353 if (snf->nfi_soc->bbm_swap)
1354 return mtk_snand_block_markbad_std(snf, addr);
1355
1356 return mtk_snand_block_markbad_mtk(snf, addr);
1357}
1358
1359int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1360 const uint8_t *oobbuf, size_t ooblen)
1361{
1362 size_t len = ooblen, sect_fdm_len;
1363 const uint8_t *oob = oobbuf;
1364 uint32_t step = 0;
1365
1366 if (!snf || !oobraw || !oob)
1367 return -EINVAL;
1368
1369 while (len && step < snf->ecc_steps) {
1370 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1371 if (sect_fdm_len > len)
1372 sect_fdm_len = len;
1373
1374 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1375 sect_fdm_len);
1376
1377 len -= sect_fdm_len;
1378 oob += sect_fdm_len;
1379 step++;
1380 }
1381
1382 return len;
1383}
1384
1385int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1386 size_t ooblen, const uint8_t *oobraw)
1387{
1388 size_t len = ooblen, sect_fdm_len;
1389 uint8_t *oob = oobbuf;
1390 uint32_t step = 0;
1391
1392 if (!snf || !oobraw || !oob)
1393 return -EINVAL;
1394
1395 while (len && step < snf->ecc_steps) {
1396 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1397 if (sect_fdm_len > len)
1398 sect_fdm_len = len;
1399
1400 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1401 sect_fdm_len);
1402
1403 len -= sect_fdm_len;
1404 oob += sect_fdm_len;
1405 step++;
1406 }
1407
1408 return len;
1409}
1410
1411int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1412 void *buf, void *oob, size_t ooblen,
1413 size_t *actualooblen, bool raw)
1414{
1415 int ret, oobremain;
1416
1417 if (!snf)
1418 return -EINVAL;
1419
1420 if (!oob)
1421 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1422
1423 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1424 if (ret && ret != -EBADMSG) {
1425 if (actualooblen)
1426 *actualooblen = 0;
1427 return ret;
1428 }
1429
1430 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1431 if (actualooblen)
1432 *actualooblen = ooblen - oobremain;
1433
1434 return ret;
1435}
1436
1437int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1438 const void *buf, const void *oob,
1439 size_t ooblen, size_t *actualooblen, bool raw)
1440{
1441 int oobremain;
1442
1443 if (!snf)
1444 return -EINVAL;
1445
1446 if (!oob)
1447 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1448
1449 memset(snf->buf_cache, 0xff, snf->oobsize);
1450 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1451 if (actualooblen)
1452 *actualooblen = ooblen - oobremain;
1453
1454 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1455}
1456
1457int mtk_snand_get_chip_info(struct mtk_snand *snf,
1458 struct mtk_snand_chip_info *info)
1459{
1460 if (!snf || !info)
1461 return -EINVAL;
1462
1463 info->model = snf->model;
1464 info->chipsize = snf->size;
1465 info->blocksize = snf->erasesize;
1466 info->pagesize = snf->writesize;
1467 info->sparesize = snf->oobsize;
1468 info->spare_per_sector = snf->spare_per_sector;
1469 info->fdm_size = snf->nfi_soc->fdm_size;
1470 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1471 info->num_sectors = snf->ecc_steps;
1472 info->sector_size = snf->nfi_soc->sector_size;
1473 info->ecc_strength = snf->ecc_strength;
1474 info->ecc_bytes = snf->ecc_bytes;
1475
1476 return 0;
1477}
1478
1479int mtk_snand_irq_process(struct mtk_snand *snf)
1480{
1481 uint32_t sta, ien;
1482
1483 if (!snf)
1484 return -EINVAL;
1485
1486 sta = nfi_read32(snf, NFI_INTR_STA);
1487 ien = nfi_read32(snf, NFI_INTR_EN);
1488
1489 if (!(sta & ien))
1490 return 0;
1491
1492 nfi_write32(snf, NFI_INTR_EN, 0);
1493 irq_completion_done(snf->pdev);
1494
1495 return 1;
1496}
1497
1498static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1499{
1500 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1501 int i, mul = 1;
1502
1503 /*
1504 * If we're using the 1KB sector size, HW will automatically
1505 * double the spare size. So we should only use half of the value.
1506 */
1507 if (snf->nfi_soc->sector_size == 1024)
1508 mul = 2;
1509
1510 spare_per_step /= mul;
1511
1512 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1513 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1514 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1515 snf->spare_per_sector *= mul;
1516 return i;
1517 }
1518 }
1519
1520 snand_log_nfi(snf->pdev,
1521 "Page size %u+%u is not supported\n", snf->writesize,
1522 snf->oobsize);
1523
1524 return -1;
1525}
1526
1527static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1528{
1529 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1530 uint32_t sector_size_512;
1531
1532 if (snf->nfi_soc->sector_size == 512) {
1533 sector_size_512 = NFI_SEC_SEL_512;
1534 spare_size_shift = NFI_SPARE_SIZE_S;
1535 } else {
1536 sector_size_512 = 0;
1537 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1538 }
1539
1540 switch (snf->writesize) {
1541 case SZ_512:
1542 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1543 break;
1544 case SZ_2K:
1545 if (snf->nfi_soc->sector_size == 512)
1546 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1547 else
1548 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1549 break;
1550 case SZ_4K:
1551 if (snf->nfi_soc->sector_size == 512)
1552 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1553 else
1554 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1555 break;
1556 case SZ_8K:
1557 if (snf->nfi_soc->sector_size == 512)
1558 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1559 else
1560 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1561 break;
1562 case SZ_16K:
1563 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1564 break;
1565 default:
1566 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1567 snf->writesize);
1568 return -ENOTSUPP;
1569 }
1570
1571 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1572 if (unlikely(spare_size_idx < 0))
1573 return -ENOTSUPP;
1574
1575 snf->raw_sector_size = snf->nfi_soc->sector_size +
1576 snf->spare_per_sector;
1577
1578 /* Setup page format */
1579 nfi_write32(snf, NFI_PAGEFMT,
1580 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1581 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1582 (spare_size_idx << spare_size_shift) |
1583 (pagesize_idx << NFI_PAGE_SIZE_S) |
1584 sector_size_512);
1585
1586 return 0;
1587}
1588
1589static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1590 uint32_t snfi_caps, uint8_t *opcode,
1591 uint8_t *dummy,
1592 const struct snand_io_cap *op_cap)
1593{
1594 uint32_t i, caps;
1595
1596 caps = snfi_caps & op_cap->caps;
1597
1598 i = fls(caps);
1599 if (i > 0) {
1600 *opcode = op_cap->opcodes[i - 1].opcode;
1601 if (dummy)
1602 *dummy = op_cap->opcodes[i - 1].dummy;
1603 return i - 1;
1604 }
1605
1606 return __SNAND_IO_MAX;
1607}
1608
1609static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1610 uint32_t snfi_caps,
1611 const struct snand_io_cap *op_cap)
1612{
1613 enum snand_flash_io idx;
1614
1615 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1616 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1617 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1618 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1619 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1620 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1621 };
1622
1623 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1624 &snf->dummy_rfc, op_cap);
1625 if (idx >= __SNAND_IO_MAX) {
1626 snand_log_snfi(snf->pdev,
1627 "No capable opcode for read from cache\n");
1628 return -ENOTSUPP;
1629 }
1630
1631 snf->mode_rfc = rfc_modes[idx];
1632
1633 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1634 snf->quad_spi_op = true;
1635
1636 return 0;
1637}
1638
1639static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1640 const struct snand_io_cap *op_cap)
1641{
1642 enum snand_flash_io idx;
1643
1644 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1645 [SNAND_IO_1_1_1] = 0,
1646 [SNAND_IO_1_1_4] = 1,
1647 };
1648
1649 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1650 NULL, op_cap);
1651 if (idx >= __SNAND_IO_MAX) {
1652 snand_log_snfi(snf->pdev,
1653 "No capable opcode for program load\n");
1654 return -ENOTSUPP;
1655 }
1656
1657 snf->mode_pl = pl_modes[idx];
1658
1659 if (idx == SNAND_IO_1_1_4)
1660 snf->quad_spi_op = true;
1661
1662 return 0;
1663}
1664
1665static int mtk_snand_setup(struct mtk_snand *snf,
1666 const struct snand_flash_info *snand_info)
1667{
1668 const struct snand_mem_org *memorg = &snand_info->memorg;
1669 uint32_t i, msg_size, snfi_caps;
1670 int ret;
1671
1672 /* Calculate flash memory organization */
1673 snf->model = snand_info->model;
1674 snf->writesize = memorg->pagesize;
1675 snf->oobsize = memorg->sparesize;
1676 snf->erasesize = snf->writesize * memorg->pages_per_block;
1677 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1678 snf->size = snf->die_size * memorg->ndies;
1679 snf->num_dies = memorg->ndies;
1680
1681 snf->writesize_mask = snf->writesize - 1;
1682 snf->erasesize_mask = snf->erasesize - 1;
1683 snf->die_mask = snf->die_size - 1;
1684
1685 snf->writesize_shift = ffs(snf->writesize) - 1;
1686 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1687 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1688
1689 snf->select_die = snand_info->select_die;
1690
1691 /* Determine opcodes for read from cache/program load */
1692 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1693 if (snf->snfi_quad_spi)
1694 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1695
1696 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1697 if (ret)
1698 return ret;
1699
1700 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1701 if (ret)
1702 return ret;
1703
1704 /* ECC and page format */
1705 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1706 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1707 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1708 snf->writesize);
1709 return -ENOTSUPP;
1710 }
1711
1712 ret = mtk_snand_pagefmt_setup(snf);
1713 if (ret)
1714 return ret;
1715
1716 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1717 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1718 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1719 msg_size);
1720 if (ret)
1721 return ret;
1722
1723 nfi_write16(snf, NFI_CNFG, 0);
1724
1725 /* Tuning options */
1726 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
developer17ded802021-07-06 20:48:25 +08001727 nfi_write32(snf, SNF_DLY_CTL3, (snf->nfi_soc->sample_delay << SFCK_SAM_DLY_S));
developerfd40db22021-04-29 10:08:25 +08001728
1729 /* Interrupts */
1730 nfi_read32(snf, NFI_INTR_STA);
1731 nfi_write32(snf, NFI_INTR_EN, 0);
1732
1733 /* Clear SNF done flag */
1734 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1735 nfi_write32(snf, SNF_STA_CTL1, 0);
1736
1737 /* Initialization on all dies */
1738 for (i = 0; i < snf->num_dies; i++) {
1739 mtk_snand_select_die(snf, i);
1740
1741 /* Disable On-Die ECC engine */
1742 ret = mtk_snand_ondie_ecc_control(snf, false);
1743 if (ret)
1744 return ret;
1745
1746 /* Disable block protection */
1747 mtk_snand_unlock(snf);
1748
1749 /* Enable/disable quad-spi */
1750 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1751 }
1752
1753 mtk_snand_select_die(snf, 0);
1754
1755 return 0;
1756}
1757
1758static int mtk_snand_id_probe(struct mtk_snand *snf,
1759 const struct snand_flash_info **snand_info)
1760{
1761 uint8_t id[4], op[2];
1762 int ret;
1763
1764 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1765 op[0] = SNAND_CMD_READID;
1766 op[1] = 0;
1767 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1768 if (ret)
1769 return ret;
1770
1771 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1772 if (*snand_info)
1773 return 0;
1774
1775 /* Read SPI-NAND JEDEC ID, OP + ID */
1776 op[0] = SNAND_CMD_READID;
1777 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1778 if (ret)
1779 return ret;
1780
1781 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1782 if (*snand_info)
1783 return 0;
1784
1785 snand_log_chip(snf->pdev,
1786 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1787 id[0], id[1], id[2], id[3]);
1788
1789 return -EINVAL;
1790}
1791
1792int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1793 struct mtk_snand **psnf)
1794{
1795 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001796 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001797 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001798 int ret;
1799
1800 if (!pdata || !psnf)
1801 return -EINVAL;
1802
1803 if (pdata->soc >= __SNAND_SOC_MAX) {
1804 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1805 pdata->soc);
1806 return -EINVAL;
1807 }
1808
1809 /* Dummy instance only for initial reset and id probe */
1810 tmpsnf.nfi_base = pdata->nfi_base;
1811 tmpsnf.ecc_base = pdata->ecc_base;
1812 tmpsnf.soc = pdata->soc;
1813 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1814 tmpsnf.pdev = dev;
1815
1816 /* Switch to SNFI mode */
1817 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1818
1819 /* Reset SNFI & NFI */
1820 mtk_snand_mac_reset(&tmpsnf);
1821 mtk_nfi_reset(&tmpsnf);
1822
1823 /* Reset SPI-NAND chip */
1824 ret = mtk_snand_chip_reset(&tmpsnf);
1825 if (ret) {
1826 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1827 return ret;
1828 }
1829
1830 /* Probe SPI-NAND flash by JEDEC ID */
1831 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1832 if (ret)
1833 return ret;
1834
1835 rawpage_size = snand_info->memorg.pagesize +
1836 snand_info->memorg.sparesize;
1837
developer4da1bed2021-05-08 17:30:37 +08001838 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1839 sizeof(*snf->sect_bf);
1840
developerfd40db22021-04-29 10:08:25 +08001841 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001842 snf = generic_mem_alloc(dev,
1843 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001844 if (!snf) {
1845 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1846 return -ENOMEM;
1847 }
1848
developer4da1bed2021-05-08 17:30:37 +08001849 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1850 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001851
1852 /* Allocate memory for DMA buffer */
1853 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1854 if (!snf->page_cache) {
1855 generic_mem_free(dev, snf);
1856 snand_log_chip(dev,
1857 "Failed to allocate memory for DMA buffer\n");
1858 return -ENOMEM;
1859 }
1860
1861 /* Fill up instance */
1862 snf->pdev = dev;
1863 snf->nfi_base = pdata->nfi_base;
1864 snf->ecc_base = pdata->ecc_base;
1865 snf->soc = pdata->soc;
1866 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1867 snf->snfi_quad_spi = pdata->quad_spi;
1868
1869 /* Initialize SNFI & ECC engine */
1870 ret = mtk_snand_setup(snf, snand_info);
1871 if (ret) {
1872 dma_mem_free(dev, snf->page_cache);
1873 generic_mem_free(dev, snf);
1874 return ret;
1875 }
1876
1877 *psnf = snf;
1878
1879 return 0;
1880}
1881
1882int mtk_snand_cleanup(struct mtk_snand *snf)
1883{
1884 if (!snf)
1885 return 0;
1886
1887 dma_mem_free(snf->pdev, snf->page_cache);
1888 generic_mem_free(snf->pdev, snf);
1889
1890 return 0;
1891}