blob: 922454d7743c4433548a94d01d45fa075fc1799a [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
developerae50ce92021-05-18 19:08:57 +080060#define NFI_ADDRCNTR 0x070
61#define SEC_CNTR GENMASK(16, 12)
62#define SEC_CNTR_S 12
63
developerfd40db22021-04-29 10:08:25 +080064#define NFI_STRADDR 0x080
65
developerae50ce92021-05-18 19:08:57 +080066#define NFI_BYTELEN 0x084
67#define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
68
developerfd40db22021-04-29 10:08:25 +080069#define NFI_FDM0L 0x0a0
70#define NFI_FDM0M 0x0a4
71#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
72#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
73
74#define NFI_DEBUG_CON1 0x220
75#define WBUF_EN BIT(2)
76
77#define NFI_MASTERSTA 0x224
78#define MAS_ADDR GENMASK(11, 9)
79#define MAS_RD GENMASK(8, 6)
80#define MAS_WR GENMASK(5, 3)
81#define MAS_RDDLY GENMASK(2, 0)
82#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
83#define AHB_BUS_BUSY BIT(1)
84#define BUS_BUSY BIT(0)
85#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
86
87/* SNFI registers */
88#define SNF_MAC_CTL 0x500
89#define MAC_XIO_SEL BIT(4)
90#define SF_MAC_EN BIT(3)
91#define SF_TRIG BIT(2)
92#define WIP_READY BIT(1)
93#define WIP BIT(0)
94
95#define SNF_MAC_OUTL 0x504
96#define SNF_MAC_INL 0x508
97
98#define SNF_RD_CTL2 0x510
99#define DATA_READ_DUMMY_S 8
100#define DATA_READ_CMD_S 0
101
102#define SNF_RD_CTL3 0x514
103
104#define SNF_PG_CTL1 0x524
105#define PG_LOAD_CMD_S 8
106
107#define SNF_PG_CTL2 0x528
108
109#define SNF_MISC_CTL 0x538
110#define SW_RST BIT(28)
111#define FIFO_RD_LTC_S 25
112#define PG_LOAD_X4_EN BIT(20)
113#define DATA_READ_MODE_S 16
114#define DATA_READ_MODE GENMASK(18, 16)
115#define DATA_READ_MODE_X1 0
116#define DATA_READ_MODE_X2 1
117#define DATA_READ_MODE_X4 2
118#define DATA_READ_MODE_DUAL 5
119#define DATA_READ_MODE_QUAD 6
120#define PG_LOAD_CUSTOM_EN BIT(7)
121#define DATARD_CUSTOM_EN BIT(6)
122#define CS_DESELECT_CYC_S 0
123
124#define SNF_MISC_CTL2 0x53c
125#define PROGRAM_LOAD_BYTE_NUM_S 16
126#define READ_DATA_BYTE_NUM_S 11
127
128#define SNF_DLY_CTL3 0x548
129#define SFCK_SAM_DLY_S 0
130
131#define SNF_STA_CTL1 0x550
132#define CUS_PG_DONE BIT(28)
133#define CUS_READ_DONE BIT(27)
134#define SPI_STATE_S 0
135#define SPI_STATE GENMASK(3, 0)
136
137#define SNF_CFG 0x55c
138#define SPI_MODE BIT(0)
139
140#define SNF_GPRAM 0x800
141#define SNF_GPRAM_SIZE 0xa0
142
143#define SNFI_POLL_INTERVAL 1000000
144
145static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
146
147static const uint8_t mt7986_spare_sizes[] = {
148 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
149 67, 74
150};
151
152static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
153 [SNAND_SOC_MT7622] = {
154 .sector_size = 512,
155 .max_sectors = 8,
156 .fdm_size = 8,
157 .fdm_ecc_size = 1,
158 .fifo_size = 32,
159 .bbm_swap = false,
160 .empty_page_check = false,
161 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
162 .spare_sizes = mt7622_spare_sizes,
163 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
164 },
165 [SNAND_SOC_MT7629] = {
166 .sector_size = 512,
167 .max_sectors = 8,
168 .fdm_size = 8,
169 .fdm_ecc_size = 1,
170 .fifo_size = 32,
171 .bbm_swap = true,
172 .empty_page_check = false,
173 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
174 .spare_sizes = mt7622_spare_sizes,
175 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
176 },
177 [SNAND_SOC_MT7986] = {
178 .sector_size = 1024,
179 .max_sectors = 16,
180 .fdm_size = 8,
181 .fdm_ecc_size = 1,
182 .fifo_size = 64,
183 .bbm_swap = true,
184 .empty_page_check = true,
185 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
186 .spare_sizes = mt7986_spare_sizes,
187 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
188 },
189};
190
191static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
192{
193 return readl(snf->nfi_base + reg);
194}
195
196static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
197 uint32_t val)
198{
199 writel(val, snf->nfi_base + reg);
200}
201
202static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
203 uint16_t val)
204{
205 writew(val, snf->nfi_base + reg);
206}
207
208static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
209 uint32_t set)
210{
211 uint32_t val;
212
213 val = readl(snf->nfi_base + reg);
214 val &= ~clr;
215 val |= set;
216 writel(val, snf->nfi_base + reg);
217}
218
219static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
220 const uint8_t *data, uint32_t len)
221{
222 uint32_t i, val = 0, es = sizeof(uint32_t);
223
224 for (i = reg; i < reg + len; i++) {
225 val |= ((uint32_t)*data++) << (8 * (i % es));
226
227 if (i % es == es - 1 || i == reg + len - 1) {
228 nfi_write32(snf, i & ~(es - 1), val);
229 val = 0;
230 }
231 }
232}
233
234static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
235 uint32_t len)
236{
237 uint32_t i, val = 0, es = sizeof(uint32_t);
238
239 for (i = reg; i < reg + len; i++) {
240 if (i == reg || i % es == 0)
241 val = nfi_read32(snf, i & ~(es - 1));
242
243 *data++ = (uint8_t)(val >> (8 * (i % es)));
244 }
245}
246
247static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
248{
249 uint8_t tmp = *bm1;
250 *bm1 = *bm2;
251 *bm2 = tmp;
252}
253
254static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
255{
256 uint32_t fdm_bbm_pos;
257
258 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
259 return;
260
261 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
262 snf->nfi_soc->sector_size;
263 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
264 &snf->page_cache[snf->writesize]);
265}
266
267static void mtk_snand_bm_swap(struct mtk_snand *snf)
268{
269 uint32_t buf_bbm_pos, fdm_bbm_pos;
270
271 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
272 return;
273
274 buf_bbm_pos = snf->writesize -
275 (snf->ecc_steps - 1) * snf->spare_per_sector;
276 fdm_bbm_pos = snf->writesize +
277 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
278 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
279 &snf->page_cache[buf_bbm_pos]);
280}
281
282static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
283{
284 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
285
286 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
287 return;
288
289 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
290 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
291 snf->nfi_soc->sector_size;
292 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
293 &snf->page_cache[fdm_bbm_pos2]);
294}
295
296static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
297{
298 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
299
300 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
301 return;
302
303 fdm_bbm_pos1 = snf->writesize;
304 fdm_bbm_pos2 = snf->writesize +
305 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
306 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
307 &snf->page_cache[fdm_bbm_pos2]);
308}
309
310static int mtk_nfi_reset(struct mtk_snand *snf)
311{
312 uint32_t val, fifo_mask;
313 int ret;
314
315 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
316
317 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
318 !(val & snf->nfi_soc->mastersta_mask), 0,
319 SNFI_POLL_INTERVAL);
320 if (ret) {
321 snand_log_nfi(snf->pdev,
322 "NFI master is still busy after reset\n");
323 return ret;
324 }
325
326 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
327 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
328 SNFI_POLL_INTERVAL);
329 if (ret) {
330 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
331 return ret;
332 }
333
334 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
335 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
336 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
337 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
338 if (ret) {
339 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
340 return ret;
341 }
342
343 return 0;
344}
345
346static int mtk_snand_mac_reset(struct mtk_snand *snf)
347{
348 int ret;
349 uint32_t val;
350
351 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
352
353 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
354 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
355 if (ret)
356 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
357
358 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
359 (10 << CS_DESELECT_CYC_S));
360
361 return ret;
362}
363
364static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
365 uint32_t inlen)
366{
367 int ret;
368 uint32_t val;
369
370 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
371 nfi_write32(snf, SNF_MAC_OUTL, outlen);
372 nfi_write32(snf, SNF_MAC_INL, inlen);
373
374 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
375
376 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
377 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
378 if (ret) {
379 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
380 goto cleanup;
381 }
382
383 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
384 !(val & WIP), 0, SNFI_POLL_INTERVAL);
385 if (ret) {
386 snand_log_snfi(snf->pdev,
387 "Timed out waiting for WIP cleared\n");
388 }
389
390cleanup:
391 nfi_write32(snf, SNF_MAC_CTL, 0);
392
393 return ret;
394}
395
396int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
397 uint8_t *in, uint32_t inlen)
398{
399 int ret;
400
401 if (outlen + inlen > SNF_GPRAM_SIZE)
402 return -EINVAL;
403
404 mtk_snand_mac_reset(snf);
405
406 nfi_write_data(snf, SNF_GPRAM, out, outlen);
407
408 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
409 if (ret)
410 return ret;
411
412 if (!inlen)
413 return 0;
414
415 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
416
417 return 0;
418}
419
420static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
421{
422 uint8_t op[2], val;
423 int ret;
424
425 op[0] = SNAND_CMD_GET_FEATURE;
426 op[1] = (uint8_t)addr;
427
428 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
429 if (ret)
430 return ret;
431
432 return val;
433}
434
435int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
436{
437 uint8_t op[3];
438
439 op[0] = SNAND_CMD_SET_FEATURE;
440 op[1] = (uint8_t)addr;
441 op[2] = (uint8_t)val;
442
443 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
444}
445
446static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
447{
448 int val;
449 mtk_snand_time_t time_start, tmo;
450
451 time_start = timer_get_ticks();
452 tmo = timer_time_to_tick(wait_us);
453
454 do {
455 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
456 if (!(val & SNAND_STATUS_OIP))
457 return val & (SNAND_STATUS_ERASE_FAIL |
458 SNAND_STATUS_PROGRAM_FAIL);
459 } while (!timer_is_timeout(time_start, tmo));
460
461 return -ETIMEDOUT;
462}
463
464int mtk_snand_chip_reset(struct mtk_snand *snf)
465{
466 uint8_t op = SNAND_CMD_RESET;
467 int ret;
468
469 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
470 if (ret)
471 return ret;
472
473 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
474 if (ret < 0)
475 return ret;
476
477 return 0;
478}
479
480static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
481 uint8_t set)
482{
483 int val, newval;
484 int ret;
485
486 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
487 if (val < 0) {
488 snand_log_chip(snf->pdev,
489 "Failed to get configuration feature\n");
490 return val;
491 }
492
493 newval = (val & (~clr)) | set;
494
495 if (newval == val)
496 return 0;
497
498 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
499 (uint8_t)newval);
500 if (val < 0) {
501 snand_log_chip(snf->pdev,
502 "Failed to set configuration feature\n");
503 return ret;
504 }
505
506 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
507 if (val < 0) {
508 snand_log_chip(snf->pdev,
509 "Failed to get configuration feature\n");
510 return val;
511 }
512
513 if (newval != val)
514 return -ENOTSUPP;
515
516 return 0;
517}
518
519static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
520{
521 int ret;
522
523 if (enable)
524 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
525 else
526 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
527
528 if (ret) {
529 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
530 enable ? "enable" : "disable");
531 }
532
533 return ret;
534}
535
536static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
537{
538 int ret;
539
540 if (enable) {
541 ret = mtk_snand_config_feature(snf, 0,
542 SNAND_FEATURE_QUAD_ENABLE);
543 } else {
544 ret = mtk_snand_config_feature(snf,
545 SNAND_FEATURE_QUAD_ENABLE, 0);
546 }
547
548 if (ret) {
549 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
550 enable ? "enable" : "disable");
551 }
552
553 return ret;
554}
555
556static int mtk_snand_unlock(struct mtk_snand *snf)
557{
558 int ret;
559
560 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
561 if (ret) {
562 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
563 return ret;
564 }
565
566 return 0;
567}
568
569static int mtk_snand_write_enable(struct mtk_snand *snf)
570{
571 uint8_t op = SNAND_CMD_WRITE_ENABLE;
572 int ret, val;
573
574 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
575 if (ret)
576 return ret;
577
578 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
579 if (val < 0)
580 return ret;
581
582 if (val & SNAND_STATUS_WEL)
583 return 0;
584
585 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
586
587 return -ENOTSUPP;
588}
589
590static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
591{
592 if (!snf->select_die)
593 return 0;
594
595 return snf->select_die(snf, dieidx);
596}
597
598static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
599 uint64_t addr)
600{
601 uint32_t dieidx;
602
603 if (!snf->select_die)
604 return addr;
605
606 dieidx = addr >> snf->die_shift;
607
608 mtk_snand_select_die(snf, dieidx);
609
610 return addr & snf->die_mask;
611}
612
613static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
614 uint32_t page)
615{
616 uint32_t pages_per_block;
617
618 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
619
620 if (page & pages_per_block)
621 return 1 << (snf->writesize_shift + 1);
622
623 return 0;
624}
625
626static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
627{
628 uint8_t op[4];
629
630 op[0] = cmd;
631 op[1] = (page >> 16) & 0xff;
632 op[2] = (page >> 8) & 0xff;
633 op[3] = page & 0xff;
634
635 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
636}
637
638static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
639{
640 uint32_t vall, valm;
641 uint8_t *oobptr = buf;
642 int i, j;
643
644 for (i = 0; i < snf->ecc_steps; i++) {
645 vall = nfi_read32(snf, NFI_FDML(i));
646 valm = nfi_read32(snf, NFI_FDMM(i));
647
648 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
649 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
650
651 oobptr += snf->nfi_soc->fdm_size;
652 }
653}
654
developer4da1bed2021-05-08 17:30:37 +0800655static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
656 uint32_t sect, uint8_t *oob)
657{
658 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
659 uint32_t coladdr, raw_offs, offs;
660 uint8_t op[4];
661
662 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
663 snand_log_snfi(snf->pdev,
664 "ECC parity size does not fit the GPRAM\n");
665 return -ENOTSUPP;
666 }
667
668 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
669 snf->nfi_soc->fdm_size;
670 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
671
672 /* Column address with plane bit */
673 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
674
675 op[0] = SNAND_CMD_READ_FROM_CACHE;
676 op[1] = (coladdr >> 8) & 0xff;
677 op[2] = coladdr & 0xff;
678 op[3] = 0;
679
680 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
681}
682
683static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
684{
685 uint8_t *oob = snf->page_cache + snf->writesize;
686 int i, rc, ret = 0, max_bitflips = 0;
687
688 for (i = 0; i < snf->ecc_steps; i++) {
689 if (snf->sect_bf[i] >= 0) {
690 if (snf->sect_bf[i] > max_bitflips)
691 max_bitflips = snf->sect_bf[i];
692 continue;
693 }
694
695 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
696 if (rc)
697 return rc;
698
699 rc = mtk_ecc_fixup_empty_sector(snf, i);
700 if (rc < 0) {
701 ret = -EBADMSG;
702
703 snand_log_ecc(snf->pdev,
704 "Uncorrectable bitflips in page %u sect %u\n",
705 page, i);
developer5136a3f2021-05-20 10:58:54 +0800706 } else if (rc) {
developer4da1bed2021-05-08 17:30:37 +0800707 snf->sect_bf[i] = rc;
708
709 if (snf->sect_bf[i] > max_bitflips)
710 max_bitflips = snf->sect_bf[i];
711
712 snand_log_ecc(snf->pdev,
713 "%u bitflip%s corrected in page %u sect %u\n",
714 rc, rc > 1 ? "s" : "", page, i);
developer5136a3f2021-05-20 10:58:54 +0800715 } else {
716 snf->sect_bf[i] = 0;
developer4da1bed2021-05-08 17:30:37 +0800717 }
718 }
719
720 return ret ? ret : max_bitflips;
721}
722
developerfd40db22021-04-29 10:08:25 +0800723static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
724{
developerae50ce92021-05-18 19:08:57 +0800725 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800726 uintptr_t dma_addr;
727 int ret;
728
729 /* Column address with plane bit */
730 coladdr = mtk_snand_get_plane_address(snf, page);
731
732 mtk_snand_mac_reset(snf);
733 mtk_nfi_reset(snf);
734
735 /* Command and dummy cycles */
736 nfi_write32(snf, SNF_RD_CTL2,
737 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
738 (snf->opcode_rfc << DATA_READ_CMD_S));
739
740 /* Column address */
741 nfi_write32(snf, SNF_RD_CTL3, coladdr);
742
743 /* Set read mode */
744 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
745 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, mode | DATARD_CUSTOM_EN);
746
747 /* Set bytes to read */
748 rwbytes = snf->ecc_steps * snf->raw_sector_size;
749 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
750 rwbytes);
751
752 /* NFI read prepare */
753 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
754 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
755 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
756
757 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
758
759 /* Prepare for DMA read */
760 len = snf->writesize + snf->oobsize;
761 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
762 if (ret) {
763 snand_log_nfi(snf->pdev,
764 "DMA map from device failed with %d\n", ret);
765 return ret;
766 }
767
768 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
769
770 if (!raw)
771 mtk_snand_ecc_decoder_start(snf);
772
773 /* Prepare for custom read interrupt */
774 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
775 irq_completion_init(snf->pdev);
776
777 /* Trigger NFI into custom mode */
778 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
779
780 /* Start DMA read */
781 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
782 nfi_write16(snf, NFI_STRDATA, STR_DATA);
783
784 /* Wait for operation finished */
785 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
786 CUS_READ_DONE, SNFI_POLL_INTERVAL);
787 if (ret) {
788 snand_log_nfi(snf->pdev,
789 "DMA timed out for reading from cache\n");
790 goto cleanup;
791 }
792
developerae50ce92021-05-18 19:08:57 +0800793 /* Wait for BUS_SEC_CNTR returning expected value */
794 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
795 BUS_SEC_CNTR(val) >= snf->ecc_steps,
796 0, SNFI_POLL_INTERVAL);
797 if (ret) {
798 snand_log_nfi(snf->pdev,
799 "Timed out waiting for BUS_SEC_CNTR\n");
800 goto cleanup;
801 }
802
803 /* Wait for bus becoming idle */
804 ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
805 !(val & snf->nfi_soc->mastersta_mask),
806 0, SNFI_POLL_INTERVAL);
807 if (ret) {
808 snand_log_nfi(snf->pdev,
809 "Timed out waiting for bus becoming idle\n");
810 goto cleanup;
811 }
812
developerfd40db22021-04-29 10:08:25 +0800813 if (!raw) {
814 ret = mtk_ecc_wait_decoder_done(snf);
815 if (ret)
816 goto cleanup;
817
818 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
819
developer4da1bed2021-05-08 17:30:37 +0800820 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800821 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800822
823 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800824 }
825
826cleanup:
827 /* DMA cleanup */
828 dma_mem_unmap(snf->pdev, dma_addr, len, false);
829
830 /* Stop read */
831 nfi_write32(snf, NFI_CON, 0);
developer4b9635a2021-05-18 14:22:39 +0800832 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +0800833
834 /* Clear SNF done flag */
835 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
836 nfi_write32(snf, SNF_STA_CTL1, 0);
837
838 /* Disable interrupt */
839 nfi_read32(snf, NFI_INTR_STA);
840 nfi_write32(snf, NFI_INTR_EN, 0);
841
842 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
843
844 return ret;
845}
846
847static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
848{
849 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
850 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
851 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
852
853 for (i = 0; i < snf->ecc_steps; i++) {
854 raw_sector = snf->page_cache + i * snf->raw_sector_size;
855
856 if (buf) {
857 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
858 bufptr += snf->nfi_soc->sector_size;
859 }
860
861 raw_sector += snf->nfi_soc->sector_size;
862
863 if (oob) {
864 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
865 oobptr += snf->nfi_soc->fdm_size;
866 raw_sector += snf->nfi_soc->fdm_size;
867
868 memcpy(eccptr, raw_sector, ecc_bytes);
869 eccptr += ecc_bytes;
870 }
871 }
872}
873
874static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
875 void *buf, void *oob, bool raw, bool format)
876{
877 uint64_t die_addr;
878 uint32_t page;
879 int ret;
880
881 die_addr = mtk_snand_select_die_address(snf, addr);
882 page = die_addr >> snf->writesize_shift;
883
884 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
885 if (ret)
886 return ret;
887
888 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
889 if (ret < 0) {
890 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
891 return ret;
892 }
893
894 ret = mtk_snand_read_cache(snf, page, raw);
895 if (ret < 0 && ret != -EBADMSG)
896 return ret;
897
898 if (raw) {
899 if (format) {
900 mtk_snand_bm_swap_raw(snf);
901 mtk_snand_fdm_bm_swap_raw(snf);
902 mtk_snand_from_raw_page(snf, buf, oob);
903 } else {
904 if (buf)
905 memcpy(buf, snf->page_cache, snf->writesize);
906
907 if (oob) {
908 memset(oob, 0xff, snf->oobsize);
909 memcpy(oob, snf->page_cache + snf->writesize,
910 snf->ecc_steps * snf->spare_per_sector);
911 }
912 }
913 } else {
914 mtk_snand_bm_swap(snf);
915 mtk_snand_fdm_bm_swap(snf);
916
917 if (buf)
918 memcpy(buf, snf->page_cache, snf->writesize);
919
920 if (oob) {
921 memset(oob, 0xff, snf->oobsize);
922 memcpy(oob, snf->page_cache + snf->writesize,
923 snf->ecc_steps * snf->nfi_soc->fdm_size);
924 }
925 }
926
927 return ret;
928}
929
930int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
931 void *oob, bool raw)
932{
933 if (!snf || (!buf && !oob))
934 return -EINVAL;
935
936 if (addr >= snf->size)
937 return -EINVAL;
938
939 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
940}
941
942static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
943{
944 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
945 const uint8_t *oobptr = buf;
946 int i, j;
947
948 for (i = 0; i < snf->ecc_steps; i++) {
949 vall = 0;
950 valm = 0;
951
952 for (j = 0; j < 8; j++) {
953 if (j < 4)
954 vall |= (j < fdm_size ? oobptr[j] : 0xff)
955 << (j * 8);
956 else
957 valm |= (j < fdm_size ? oobptr[j] : 0xff)
958 << ((j - 4) * 8);
959 }
960
961 nfi_write32(snf, NFI_FDML(i), vall);
962 nfi_write32(snf, NFI_FDMM(i), valm);
963
964 oobptr += fdm_size;
965 }
966}
967
968static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
969 bool raw)
970{
developerae50ce92021-05-18 19:08:57 +0800971 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800972 uintptr_t dma_addr;
973 int ret;
974
975 /* Column address with plane bit */
976 coladdr = mtk_snand_get_plane_address(snf, page);
977
978 mtk_snand_mac_reset(snf);
979 mtk_nfi_reset(snf);
980
981 /* Write FDM registers if necessary */
982 if (!raw)
983 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
984
985 /* Command */
986 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
987
988 /* Column address */
989 nfi_write32(snf, SNF_PG_CTL2, coladdr);
990
991 /* Set write mode */
992 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
993 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
994
995 /* Set bytes to write */
996 rwbytes = snf->ecc_steps * snf->raw_sector_size;
997 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
998 rwbytes);
999
1000 /* NFI write prepare */
1001 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
1002 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1003 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
1004
1005 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
1006
1007 /* Prepare for DMA write */
1008 len = snf->writesize + snf->oobsize;
1009 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
1010 if (ret) {
1011 snand_log_nfi(snf->pdev,
1012 "DMA map to device failed with %d\n", ret);
1013 return ret;
1014 }
1015
1016 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
1017
1018 if (!raw)
1019 mtk_snand_ecc_encoder_start(snf);
1020
1021 /* Prepare for custom write interrupt */
1022 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1023 irq_completion_init(snf->pdev);
1024
1025 /* Trigger NFI into custom mode */
1026 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1027
1028 /* Start DMA write */
1029 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1030 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1031
1032 /* Wait for operation finished */
1033 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1034 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1035 if (ret) {
1036 snand_log_nfi(snf->pdev,
1037 "DMA timed out for program load\n");
1038 goto cleanup;
1039 }
1040
developerae50ce92021-05-18 19:08:57 +08001041 /* Wait for BUS_SEC_CNTR returning expected value */
1042 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
1043 BUS_SEC_CNTR(val) >= snf->ecc_steps,
1044 0, SNFI_POLL_INTERVAL);
1045 if (ret) {
1046 snand_log_nfi(snf->pdev,
1047 "Timed out waiting for BUS_SEC_CNTR\n");
1048 goto cleanup;
1049 }
1050
developerfd40db22021-04-29 10:08:25 +08001051 if (!raw)
1052 mtk_snand_ecc_encoder_stop(snf);
1053
1054cleanup:
1055 /* DMA cleanup */
1056 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1057
1058 /* Stop write */
developer4b9635a2021-05-18 14:22:39 +08001059 nfi_write32(snf, NFI_CON, 0);
1060 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +08001061
1062 /* Clear SNF done flag */
1063 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1064 nfi_write32(snf, SNF_STA_CTL1, 0);
1065
1066 /* Disable interrupt */
1067 nfi_read32(snf, NFI_INTR_STA);
1068 nfi_write32(snf, NFI_INTR_EN, 0);
1069
1070 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1071
1072 return ret;
1073}
1074
1075static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1076 const void *buf, const void *oob,
1077 bool empty_ecc)
1078{
1079 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1080 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1081 const uint8_t *bufptr = buf, *oobptr = oob;
1082 uint8_t *raw_sector;
1083
1084 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1085 for (i = 0; i < snf->ecc_steps; i++) {
1086 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1087
1088 if (buf) {
1089 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1090 bufptr += snf->nfi_soc->sector_size;
1091 }
1092
1093 raw_sector += snf->nfi_soc->sector_size;
1094
1095 if (oob) {
1096 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1097 oobptr += snf->nfi_soc->fdm_size;
1098 raw_sector += snf->nfi_soc->fdm_size;
1099
1100 if (empty_ecc)
1101 memset(raw_sector, 0xff, ecc_bytes);
1102 else
1103 memcpy(raw_sector, eccptr, ecc_bytes);
1104 eccptr += ecc_bytes;
1105 }
1106 }
1107}
1108
1109static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1110 const void *oob)
1111{
1112 const uint8_t *p = buf;
1113 uint32_t i, j;
1114
1115 if (buf) {
1116 for (i = 0; i < snf->writesize; i++) {
1117 if (p[i] != 0xff)
1118 return false;
1119 }
1120 }
1121
1122 if (oob) {
1123 for (j = 0; j < snf->ecc_steps; j++) {
1124 p = oob + j * snf->nfi_soc->fdm_size;
1125
1126 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1127 if (p[i] != 0xff)
1128 return false;
1129 }
1130 }
1131 }
1132
1133 return true;
1134}
1135
1136static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1137 const void *buf, const void *oob,
1138 bool raw, bool format)
1139{
1140 uint64_t die_addr;
1141 bool empty_ecc = false;
1142 uint32_t page;
1143 int ret;
1144
1145 die_addr = mtk_snand_select_die_address(snf, addr);
1146 page = die_addr >> snf->writesize_shift;
1147
1148 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1149 /*
1150 * If the data in the page to be ecc-ed is full 0xff,
1151 * change to raw write mode
1152 */
1153 raw = true;
1154 format = true;
1155
1156 /* fill ecc parity code region with 0xff */
1157 empty_ecc = true;
1158 }
1159
1160 if (raw) {
1161 if (format) {
1162 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1163 mtk_snand_fdm_bm_swap_raw(snf);
1164 mtk_snand_bm_swap_raw(snf);
1165 } else {
1166 memset(snf->page_cache, 0xff,
1167 snf->writesize + snf->oobsize);
1168
1169 if (buf)
1170 memcpy(snf->page_cache, buf, snf->writesize);
1171
1172 if (oob) {
1173 memcpy(snf->page_cache + snf->writesize, oob,
1174 snf->ecc_steps * snf->spare_per_sector);
1175 }
1176 }
1177 } else {
1178 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1179 if (buf)
1180 memcpy(snf->page_cache, buf, snf->writesize);
1181
1182 if (oob) {
1183 memcpy(snf->page_cache + snf->writesize, oob,
1184 snf->ecc_steps * snf->nfi_soc->fdm_size);
1185 }
1186
1187 mtk_snand_fdm_bm_swap(snf);
1188 mtk_snand_bm_swap(snf);
1189 }
1190
1191 ret = mtk_snand_write_enable(snf);
1192 if (ret)
1193 return ret;
1194
1195 ret = mtk_snand_program_load(snf, page, raw);
1196 if (ret)
1197 return ret;
1198
1199 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1200 if (ret)
1201 return ret;
1202
1203 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1204 if (ret < 0) {
1205 snand_log_chip(snf->pdev,
1206 "Page program command timed out on page %u\n",
1207 page);
1208 return ret;
1209 }
1210
1211 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1212 snand_log_chip(snf->pdev,
1213 "Page program failed on page %u\n", page);
1214 return -EIO;
1215 }
1216
1217 return 0;
1218}
1219
1220int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1221 const void *oob, bool raw)
1222{
1223 if (!snf || (!buf && !oob))
1224 return -EINVAL;
1225
1226 if (addr >= snf->size)
1227 return -EINVAL;
1228
1229 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1230}
1231
1232int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1233{
1234 uint64_t die_addr;
1235 uint32_t page, block;
1236 int ret;
1237
1238 if (!snf)
1239 return -EINVAL;
1240
1241 if (addr >= snf->size)
1242 return -EINVAL;
1243
1244 die_addr = mtk_snand_select_die_address(snf, addr);
1245 block = die_addr >> snf->erasesize_shift;
1246 page = block << (snf->erasesize_shift - snf->writesize_shift);
1247
1248 ret = mtk_snand_write_enable(snf);
1249 if (ret)
1250 return ret;
1251
1252 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1253 if (ret)
1254 return ret;
1255
1256 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1257 if (ret < 0) {
1258 snand_log_chip(snf->pdev,
1259 "Block erase command timed out on block %u\n",
1260 block);
1261 return ret;
1262 }
1263
1264 if (ret & SNAND_STATUS_ERASE_FAIL) {
1265 snand_log_chip(snf->pdev,
1266 "Block erase failed on block %u\n", block);
1267 return -EIO;
1268 }
1269
1270 return 0;
1271}
1272
1273static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1274{
1275 int ret;
1276
1277 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1278 false);
1279 if (ret && ret != -EBADMSG)
1280 return ret;
1281
1282 return snf->buf_cache[0] != 0xff;
1283}
1284
1285static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1286{
1287 int ret;
1288
1289 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1290 true);
1291 if (ret && ret != -EBADMSG)
1292 return ret;
1293
1294 return snf->buf_cache[0] != 0xff;
1295}
1296
1297int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1298{
1299 if (!snf)
1300 return -EINVAL;
1301
1302 if (addr >= snf->size)
1303 return -EINVAL;
1304
1305 addr &= ~snf->erasesize_mask;
1306
1307 if (snf->nfi_soc->bbm_swap)
1308 return mtk_snand_block_isbad_std(snf, addr);
1309
1310 return mtk_snand_block_isbad_mtk(snf, addr);
1311}
1312
1313static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1314{
1315 /* Standard BBM position */
1316 memset(snf->buf_cache, 0xff, snf->oobsize);
1317 snf->buf_cache[0] = 0;
1318
1319 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1320 false);
1321}
1322
1323static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1324{
1325 /* Write the whole page with zeros */
1326 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1327
1328 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1329 snf->buf_cache + snf->writesize, true,
1330 true);
1331}
1332
1333int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1334{
1335 if (!snf)
1336 return -EINVAL;
1337
1338 if (addr >= snf->size)
1339 return -EINVAL;
1340
1341 addr &= ~snf->erasesize_mask;
1342
1343 if (snf->nfi_soc->bbm_swap)
1344 return mtk_snand_block_markbad_std(snf, addr);
1345
1346 return mtk_snand_block_markbad_mtk(snf, addr);
1347}
1348
1349int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1350 const uint8_t *oobbuf, size_t ooblen)
1351{
1352 size_t len = ooblen, sect_fdm_len;
1353 const uint8_t *oob = oobbuf;
1354 uint32_t step = 0;
1355
1356 if (!snf || !oobraw || !oob)
1357 return -EINVAL;
1358
1359 while (len && step < snf->ecc_steps) {
1360 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1361 if (sect_fdm_len > len)
1362 sect_fdm_len = len;
1363
1364 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1365 sect_fdm_len);
1366
1367 len -= sect_fdm_len;
1368 oob += sect_fdm_len;
1369 step++;
1370 }
1371
1372 return len;
1373}
1374
1375int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1376 size_t ooblen, const uint8_t *oobraw)
1377{
1378 size_t len = ooblen, sect_fdm_len;
1379 uint8_t *oob = oobbuf;
1380 uint32_t step = 0;
1381
1382 if (!snf || !oobraw || !oob)
1383 return -EINVAL;
1384
1385 while (len && step < snf->ecc_steps) {
1386 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1387 if (sect_fdm_len > len)
1388 sect_fdm_len = len;
1389
1390 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1391 sect_fdm_len);
1392
1393 len -= sect_fdm_len;
1394 oob += sect_fdm_len;
1395 step++;
1396 }
1397
1398 return len;
1399}
1400
1401int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1402 void *buf, void *oob, size_t ooblen,
1403 size_t *actualooblen, bool raw)
1404{
1405 int ret, oobremain;
1406
1407 if (!snf)
1408 return -EINVAL;
1409
1410 if (!oob)
1411 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1412
1413 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1414 if (ret && ret != -EBADMSG) {
1415 if (actualooblen)
1416 *actualooblen = 0;
1417 return ret;
1418 }
1419
1420 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1421 if (actualooblen)
1422 *actualooblen = ooblen - oobremain;
1423
1424 return ret;
1425}
1426
1427int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1428 const void *buf, const void *oob,
1429 size_t ooblen, size_t *actualooblen, bool raw)
1430{
1431 int oobremain;
1432
1433 if (!snf)
1434 return -EINVAL;
1435
1436 if (!oob)
1437 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1438
1439 memset(snf->buf_cache, 0xff, snf->oobsize);
1440 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1441 if (actualooblen)
1442 *actualooblen = ooblen - oobremain;
1443
1444 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1445}
1446
1447int mtk_snand_get_chip_info(struct mtk_snand *snf,
1448 struct mtk_snand_chip_info *info)
1449{
1450 if (!snf || !info)
1451 return -EINVAL;
1452
1453 info->model = snf->model;
1454 info->chipsize = snf->size;
1455 info->blocksize = snf->erasesize;
1456 info->pagesize = snf->writesize;
1457 info->sparesize = snf->oobsize;
1458 info->spare_per_sector = snf->spare_per_sector;
1459 info->fdm_size = snf->nfi_soc->fdm_size;
1460 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1461 info->num_sectors = snf->ecc_steps;
1462 info->sector_size = snf->nfi_soc->sector_size;
1463 info->ecc_strength = snf->ecc_strength;
1464 info->ecc_bytes = snf->ecc_bytes;
1465
1466 return 0;
1467}
1468
1469int mtk_snand_irq_process(struct mtk_snand *snf)
1470{
1471 uint32_t sta, ien;
1472
1473 if (!snf)
1474 return -EINVAL;
1475
1476 sta = nfi_read32(snf, NFI_INTR_STA);
1477 ien = nfi_read32(snf, NFI_INTR_EN);
1478
1479 if (!(sta & ien))
1480 return 0;
1481
1482 nfi_write32(snf, NFI_INTR_EN, 0);
1483 irq_completion_done(snf->pdev);
1484
1485 return 1;
1486}
1487
1488static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1489{
1490 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1491 int i, mul = 1;
1492
1493 /*
1494 * If we're using the 1KB sector size, HW will automatically
1495 * double the spare size. So we should only use half of the value.
1496 */
1497 if (snf->nfi_soc->sector_size == 1024)
1498 mul = 2;
1499
1500 spare_per_step /= mul;
1501
1502 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1503 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1504 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1505 snf->spare_per_sector *= mul;
1506 return i;
1507 }
1508 }
1509
1510 snand_log_nfi(snf->pdev,
1511 "Page size %u+%u is not supported\n", snf->writesize,
1512 snf->oobsize);
1513
1514 return -1;
1515}
1516
1517static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1518{
1519 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1520 uint32_t sector_size_512;
1521
1522 if (snf->nfi_soc->sector_size == 512) {
1523 sector_size_512 = NFI_SEC_SEL_512;
1524 spare_size_shift = NFI_SPARE_SIZE_S;
1525 } else {
1526 sector_size_512 = 0;
1527 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1528 }
1529
1530 switch (snf->writesize) {
1531 case SZ_512:
1532 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1533 break;
1534 case SZ_2K:
1535 if (snf->nfi_soc->sector_size == 512)
1536 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1537 else
1538 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1539 break;
1540 case SZ_4K:
1541 if (snf->nfi_soc->sector_size == 512)
1542 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1543 else
1544 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1545 break;
1546 case SZ_8K:
1547 if (snf->nfi_soc->sector_size == 512)
1548 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1549 else
1550 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1551 break;
1552 case SZ_16K:
1553 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1554 break;
1555 default:
1556 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1557 snf->writesize);
1558 return -ENOTSUPP;
1559 }
1560
1561 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1562 if (unlikely(spare_size_idx < 0))
1563 return -ENOTSUPP;
1564
1565 snf->raw_sector_size = snf->nfi_soc->sector_size +
1566 snf->spare_per_sector;
1567
1568 /* Setup page format */
1569 nfi_write32(snf, NFI_PAGEFMT,
1570 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1571 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1572 (spare_size_idx << spare_size_shift) |
1573 (pagesize_idx << NFI_PAGE_SIZE_S) |
1574 sector_size_512);
1575
1576 return 0;
1577}
1578
1579static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1580 uint32_t snfi_caps, uint8_t *opcode,
1581 uint8_t *dummy,
1582 const struct snand_io_cap *op_cap)
1583{
1584 uint32_t i, caps;
1585
1586 caps = snfi_caps & op_cap->caps;
1587
1588 i = fls(caps);
1589 if (i > 0) {
1590 *opcode = op_cap->opcodes[i - 1].opcode;
1591 if (dummy)
1592 *dummy = op_cap->opcodes[i - 1].dummy;
1593 return i - 1;
1594 }
1595
1596 return __SNAND_IO_MAX;
1597}
1598
1599static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1600 uint32_t snfi_caps,
1601 const struct snand_io_cap *op_cap)
1602{
1603 enum snand_flash_io idx;
1604
1605 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1606 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1607 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1608 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1609 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1610 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1611 };
1612
1613 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1614 &snf->dummy_rfc, op_cap);
1615 if (idx >= __SNAND_IO_MAX) {
1616 snand_log_snfi(snf->pdev,
1617 "No capable opcode for read from cache\n");
1618 return -ENOTSUPP;
1619 }
1620
1621 snf->mode_rfc = rfc_modes[idx];
1622
1623 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1624 snf->quad_spi_op = true;
1625
1626 return 0;
1627}
1628
1629static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1630 const struct snand_io_cap *op_cap)
1631{
1632 enum snand_flash_io idx;
1633
1634 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1635 [SNAND_IO_1_1_1] = 0,
1636 [SNAND_IO_1_1_4] = 1,
1637 };
1638
1639 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1640 NULL, op_cap);
1641 if (idx >= __SNAND_IO_MAX) {
1642 snand_log_snfi(snf->pdev,
1643 "No capable opcode for program load\n");
1644 return -ENOTSUPP;
1645 }
1646
1647 snf->mode_pl = pl_modes[idx];
1648
1649 if (idx == SNAND_IO_1_1_4)
1650 snf->quad_spi_op = true;
1651
1652 return 0;
1653}
1654
1655static int mtk_snand_setup(struct mtk_snand *snf,
1656 const struct snand_flash_info *snand_info)
1657{
1658 const struct snand_mem_org *memorg = &snand_info->memorg;
1659 uint32_t i, msg_size, snfi_caps;
1660 int ret;
1661
1662 /* Calculate flash memory organization */
1663 snf->model = snand_info->model;
1664 snf->writesize = memorg->pagesize;
1665 snf->oobsize = memorg->sparesize;
1666 snf->erasesize = snf->writesize * memorg->pages_per_block;
1667 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1668 snf->size = snf->die_size * memorg->ndies;
1669 snf->num_dies = memorg->ndies;
1670
1671 snf->writesize_mask = snf->writesize - 1;
1672 snf->erasesize_mask = snf->erasesize - 1;
1673 snf->die_mask = snf->die_size - 1;
1674
1675 snf->writesize_shift = ffs(snf->writesize) - 1;
1676 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1677 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1678
1679 snf->select_die = snand_info->select_die;
1680
1681 /* Determine opcodes for read from cache/program load */
1682 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1683 if (snf->snfi_quad_spi)
1684 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1685
1686 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1687 if (ret)
1688 return ret;
1689
1690 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1691 if (ret)
1692 return ret;
1693
1694 /* ECC and page format */
1695 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1696 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1697 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1698 snf->writesize);
1699 return -ENOTSUPP;
1700 }
1701
1702 ret = mtk_snand_pagefmt_setup(snf);
1703 if (ret)
1704 return ret;
1705
1706 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1707 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1708 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1709 msg_size);
1710 if (ret)
1711 return ret;
1712
1713 nfi_write16(snf, NFI_CNFG, 0);
1714
1715 /* Tuning options */
1716 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
1717 nfi_write32(snf, SNF_DLY_CTL3, (40 << SFCK_SAM_DLY_S));
1718
1719 /* Interrupts */
1720 nfi_read32(snf, NFI_INTR_STA);
1721 nfi_write32(snf, NFI_INTR_EN, 0);
1722
1723 /* Clear SNF done flag */
1724 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1725 nfi_write32(snf, SNF_STA_CTL1, 0);
1726
1727 /* Initialization on all dies */
1728 for (i = 0; i < snf->num_dies; i++) {
1729 mtk_snand_select_die(snf, i);
1730
1731 /* Disable On-Die ECC engine */
1732 ret = mtk_snand_ondie_ecc_control(snf, false);
1733 if (ret)
1734 return ret;
1735
1736 /* Disable block protection */
1737 mtk_snand_unlock(snf);
1738
1739 /* Enable/disable quad-spi */
1740 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1741 }
1742
1743 mtk_snand_select_die(snf, 0);
1744
1745 return 0;
1746}
1747
1748static int mtk_snand_id_probe(struct mtk_snand *snf,
1749 const struct snand_flash_info **snand_info)
1750{
1751 uint8_t id[4], op[2];
1752 int ret;
1753
1754 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1755 op[0] = SNAND_CMD_READID;
1756 op[1] = 0;
1757 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1758 if (ret)
1759 return ret;
1760
1761 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1762 if (*snand_info)
1763 return 0;
1764
1765 /* Read SPI-NAND JEDEC ID, OP + ID */
1766 op[0] = SNAND_CMD_READID;
1767 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1768 if (ret)
1769 return ret;
1770
1771 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1772 if (*snand_info)
1773 return 0;
1774
1775 snand_log_chip(snf->pdev,
1776 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1777 id[0], id[1], id[2], id[3]);
1778
1779 return -EINVAL;
1780}
1781
1782int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1783 struct mtk_snand **psnf)
1784{
1785 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001786 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001787 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001788 int ret;
1789
1790 if (!pdata || !psnf)
1791 return -EINVAL;
1792
1793 if (pdata->soc >= __SNAND_SOC_MAX) {
1794 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1795 pdata->soc);
1796 return -EINVAL;
1797 }
1798
1799 /* Dummy instance only for initial reset and id probe */
1800 tmpsnf.nfi_base = pdata->nfi_base;
1801 tmpsnf.ecc_base = pdata->ecc_base;
1802 tmpsnf.soc = pdata->soc;
1803 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1804 tmpsnf.pdev = dev;
1805
1806 /* Switch to SNFI mode */
1807 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1808
1809 /* Reset SNFI & NFI */
1810 mtk_snand_mac_reset(&tmpsnf);
1811 mtk_nfi_reset(&tmpsnf);
1812
1813 /* Reset SPI-NAND chip */
1814 ret = mtk_snand_chip_reset(&tmpsnf);
1815 if (ret) {
1816 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1817 return ret;
1818 }
1819
1820 /* Probe SPI-NAND flash by JEDEC ID */
1821 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1822 if (ret)
1823 return ret;
1824
1825 rawpage_size = snand_info->memorg.pagesize +
1826 snand_info->memorg.sparesize;
1827
developer4da1bed2021-05-08 17:30:37 +08001828 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1829 sizeof(*snf->sect_bf);
1830
developerfd40db22021-04-29 10:08:25 +08001831 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001832 snf = generic_mem_alloc(dev,
1833 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001834 if (!snf) {
1835 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1836 return -ENOMEM;
1837 }
1838
developer4da1bed2021-05-08 17:30:37 +08001839 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1840 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001841
1842 /* Allocate memory for DMA buffer */
1843 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1844 if (!snf->page_cache) {
1845 generic_mem_free(dev, snf);
1846 snand_log_chip(dev,
1847 "Failed to allocate memory for DMA buffer\n");
1848 return -ENOMEM;
1849 }
1850
1851 /* Fill up instance */
1852 snf->pdev = dev;
1853 snf->nfi_base = pdata->nfi_base;
1854 snf->ecc_base = pdata->ecc_base;
1855 snf->soc = pdata->soc;
1856 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1857 snf->snfi_quad_spi = pdata->quad_spi;
1858
1859 /* Initialize SNFI & ECC engine */
1860 ret = mtk_snand_setup(snf, snand_info);
1861 if (ret) {
1862 dma_mem_free(dev, snf->page_cache);
1863 generic_mem_free(dev, snf);
1864 return ret;
1865 }
1866
1867 *psnf = snf;
1868
1869 return 0;
1870}
1871
1872int mtk_snand_cleanup(struct mtk_snand *snf)
1873{
1874 if (!snf)
1875 return 0;
1876
1877 dma_mem_free(snf->pdev, snf->page_cache);
1878 generic_mem_free(snf->pdev, snf);
1879
1880 return 0;
1881}