blob: 89abfe08ce5ecae83d213179a324d1645ef0af15 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
developerae50ce92021-05-18 19:08:57 +080060#define NFI_ADDRCNTR 0x070
61#define SEC_CNTR GENMASK(16, 12)
developerab415832021-05-26 09:49:32 +080062#define SEC_CNTR_S 12
63#define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080064
developerfd40db22021-04-29 10:08:25 +080065#define NFI_STRADDR 0x080
66
developerae50ce92021-05-18 19:08:57 +080067#define NFI_BYTELEN 0x084
developerab415832021-05-26 09:49:32 +080068#define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080069
developerfd40db22021-04-29 10:08:25 +080070#define NFI_FDM0L 0x0a0
71#define NFI_FDM0M 0x0a4
72#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
73#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
74
75#define NFI_DEBUG_CON1 0x220
76#define WBUF_EN BIT(2)
77
78#define NFI_MASTERSTA 0x224
79#define MAS_ADDR GENMASK(11, 9)
80#define MAS_RD GENMASK(8, 6)
81#define MAS_WR GENMASK(5, 3)
82#define MAS_RDDLY GENMASK(2, 0)
83#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
84#define AHB_BUS_BUSY BIT(1)
85#define BUS_BUSY BIT(0)
86#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
87
88/* SNFI registers */
89#define SNF_MAC_CTL 0x500
90#define MAC_XIO_SEL BIT(4)
91#define SF_MAC_EN BIT(3)
92#define SF_TRIG BIT(2)
93#define WIP_READY BIT(1)
94#define WIP BIT(0)
95
96#define SNF_MAC_OUTL 0x504
97#define SNF_MAC_INL 0x508
98
99#define SNF_RD_CTL2 0x510
100#define DATA_READ_DUMMY_S 8
101#define DATA_READ_CMD_S 0
102
103#define SNF_RD_CTL3 0x514
104
105#define SNF_PG_CTL1 0x524
106#define PG_LOAD_CMD_S 8
107
108#define SNF_PG_CTL2 0x528
109
110#define SNF_MISC_CTL 0x538
111#define SW_RST BIT(28)
112#define FIFO_RD_LTC_S 25
113#define PG_LOAD_X4_EN BIT(20)
114#define DATA_READ_MODE_S 16
115#define DATA_READ_MODE GENMASK(18, 16)
116#define DATA_READ_MODE_X1 0
117#define DATA_READ_MODE_X2 1
118#define DATA_READ_MODE_X4 2
119#define DATA_READ_MODE_DUAL 5
120#define DATA_READ_MODE_QUAD 6
121#define PG_LOAD_CUSTOM_EN BIT(7)
122#define DATARD_CUSTOM_EN BIT(6)
123#define CS_DESELECT_CYC_S 0
124
125#define SNF_MISC_CTL2 0x53c
126#define PROGRAM_LOAD_BYTE_NUM_S 16
127#define READ_DATA_BYTE_NUM_S 11
128
129#define SNF_DLY_CTL3 0x548
130#define SFCK_SAM_DLY_S 0
131
132#define SNF_STA_CTL1 0x550
133#define CUS_PG_DONE BIT(28)
134#define CUS_READ_DONE BIT(27)
135#define SPI_STATE_S 0
136#define SPI_STATE GENMASK(3, 0)
137
138#define SNF_CFG 0x55c
139#define SPI_MODE BIT(0)
140
141#define SNF_GPRAM 0x800
142#define SNF_GPRAM_SIZE 0xa0
143
144#define SNFI_POLL_INTERVAL 1000000
145
146static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
147
148static const uint8_t mt7986_spare_sizes[] = {
149 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
150 67, 74
151};
152
153static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
154 [SNAND_SOC_MT7622] = {
155 .sector_size = 512,
156 .max_sectors = 8,
157 .fdm_size = 8,
158 .fdm_ecc_size = 1,
159 .fifo_size = 32,
160 .bbm_swap = false,
161 .empty_page_check = false,
162 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
163 .spare_sizes = mt7622_spare_sizes,
164 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
165 },
166 [SNAND_SOC_MT7629] = {
167 .sector_size = 512,
168 .max_sectors = 8,
169 .fdm_size = 8,
170 .fdm_ecc_size = 1,
171 .fifo_size = 32,
172 .bbm_swap = true,
173 .empty_page_check = false,
174 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
175 .spare_sizes = mt7622_spare_sizes,
176 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
177 },
178 [SNAND_SOC_MT7986] = {
179 .sector_size = 1024,
180 .max_sectors = 16,
181 .fdm_size = 8,
182 .fdm_ecc_size = 1,
183 .fifo_size = 64,
184 .bbm_swap = true,
185 .empty_page_check = true,
186 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
187 .spare_sizes = mt7986_spare_sizes,
188 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
189 },
190};
191
192static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
193{
194 return readl(snf->nfi_base + reg);
195}
196
197static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
198 uint32_t val)
199{
200 writel(val, snf->nfi_base + reg);
201}
202
203static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
204 uint16_t val)
205{
206 writew(val, snf->nfi_base + reg);
207}
208
209static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
210 uint32_t set)
211{
212 uint32_t val;
213
214 val = readl(snf->nfi_base + reg);
215 val &= ~clr;
216 val |= set;
217 writel(val, snf->nfi_base + reg);
218}
219
220static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
221 const uint8_t *data, uint32_t len)
222{
223 uint32_t i, val = 0, es = sizeof(uint32_t);
224
225 for (i = reg; i < reg + len; i++) {
226 val |= ((uint32_t)*data++) << (8 * (i % es));
227
228 if (i % es == es - 1 || i == reg + len - 1) {
229 nfi_write32(snf, i & ~(es - 1), val);
230 val = 0;
231 }
232 }
233}
234
235static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
236 uint32_t len)
237{
238 uint32_t i, val = 0, es = sizeof(uint32_t);
239
240 for (i = reg; i < reg + len; i++) {
241 if (i == reg || i % es == 0)
242 val = nfi_read32(snf, i & ~(es - 1));
243
244 *data++ = (uint8_t)(val >> (8 * (i % es)));
245 }
246}
247
248static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
249{
250 uint8_t tmp = *bm1;
251 *bm1 = *bm2;
252 *bm2 = tmp;
253}
254
255static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
256{
257 uint32_t fdm_bbm_pos;
258
259 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
260 return;
261
262 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
263 snf->nfi_soc->sector_size;
264 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
265 &snf->page_cache[snf->writesize]);
266}
267
268static void mtk_snand_bm_swap(struct mtk_snand *snf)
269{
270 uint32_t buf_bbm_pos, fdm_bbm_pos;
271
272 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
273 return;
274
275 buf_bbm_pos = snf->writesize -
276 (snf->ecc_steps - 1) * snf->spare_per_sector;
277 fdm_bbm_pos = snf->writesize +
278 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
279 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
280 &snf->page_cache[buf_bbm_pos]);
281}
282
283static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
284{
285 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
286
287 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
288 return;
289
290 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
291 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
292 snf->nfi_soc->sector_size;
293 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
294 &snf->page_cache[fdm_bbm_pos2]);
295}
296
297static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
298{
299 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
300
301 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
302 return;
303
304 fdm_bbm_pos1 = snf->writesize;
305 fdm_bbm_pos2 = snf->writesize +
306 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
307 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
308 &snf->page_cache[fdm_bbm_pos2]);
309}
310
311static int mtk_nfi_reset(struct mtk_snand *snf)
312{
313 uint32_t val, fifo_mask;
314 int ret;
315
316 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
317
318 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
319 !(val & snf->nfi_soc->mastersta_mask), 0,
320 SNFI_POLL_INTERVAL);
321 if (ret) {
322 snand_log_nfi(snf->pdev,
323 "NFI master is still busy after reset\n");
324 return ret;
325 }
326
327 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
328 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
329 SNFI_POLL_INTERVAL);
330 if (ret) {
331 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
332 return ret;
333 }
334
335 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
336 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
337 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
338 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
339 if (ret) {
340 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
341 return ret;
342 }
343
344 return 0;
345}
346
347static int mtk_snand_mac_reset(struct mtk_snand *snf)
348{
349 int ret;
350 uint32_t val;
351
352 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
353
354 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
355 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
356 if (ret)
357 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
358
359 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
360 (10 << CS_DESELECT_CYC_S));
361
362 return ret;
363}
364
365static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
366 uint32_t inlen)
367{
368 int ret;
369 uint32_t val;
370
371 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
372 nfi_write32(snf, SNF_MAC_OUTL, outlen);
373 nfi_write32(snf, SNF_MAC_INL, inlen);
374
375 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
376
377 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
378 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
379 if (ret) {
380 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
381 goto cleanup;
382 }
383
384 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
385 !(val & WIP), 0, SNFI_POLL_INTERVAL);
386 if (ret) {
387 snand_log_snfi(snf->pdev,
388 "Timed out waiting for WIP cleared\n");
389 }
390
391cleanup:
392 nfi_write32(snf, SNF_MAC_CTL, 0);
393
394 return ret;
395}
396
397int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
398 uint8_t *in, uint32_t inlen)
399{
400 int ret;
401
402 if (outlen + inlen > SNF_GPRAM_SIZE)
403 return -EINVAL;
404
405 mtk_snand_mac_reset(snf);
406
407 nfi_write_data(snf, SNF_GPRAM, out, outlen);
408
409 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
410 if (ret)
411 return ret;
412
413 if (!inlen)
414 return 0;
415
416 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
417
418 return 0;
419}
420
421static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
422{
423 uint8_t op[2], val;
424 int ret;
425
426 op[0] = SNAND_CMD_GET_FEATURE;
427 op[1] = (uint8_t)addr;
428
429 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
430 if (ret)
431 return ret;
432
433 return val;
434}
435
436int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
437{
438 uint8_t op[3];
439
440 op[0] = SNAND_CMD_SET_FEATURE;
441 op[1] = (uint8_t)addr;
442 op[2] = (uint8_t)val;
443
444 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
445}
446
447static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
448{
449 int val;
450 mtk_snand_time_t time_start, tmo;
451
452 time_start = timer_get_ticks();
453 tmo = timer_time_to_tick(wait_us);
454
455 do {
456 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
457 if (!(val & SNAND_STATUS_OIP))
458 return val & (SNAND_STATUS_ERASE_FAIL |
459 SNAND_STATUS_PROGRAM_FAIL);
460 } while (!timer_is_timeout(time_start, tmo));
461
462 return -ETIMEDOUT;
463}
464
465int mtk_snand_chip_reset(struct mtk_snand *snf)
466{
467 uint8_t op = SNAND_CMD_RESET;
468 int ret;
469
470 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
471 if (ret)
472 return ret;
473
474 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
475 if (ret < 0)
476 return ret;
477
478 return 0;
479}
480
481static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
482 uint8_t set)
483{
484 int val, newval;
485 int ret;
486
487 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
488 if (val < 0) {
489 snand_log_chip(snf->pdev,
490 "Failed to get configuration feature\n");
491 return val;
492 }
493
494 newval = (val & (~clr)) | set;
495
496 if (newval == val)
497 return 0;
498
499 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
500 (uint8_t)newval);
501 if (val < 0) {
502 snand_log_chip(snf->pdev,
503 "Failed to set configuration feature\n");
504 return ret;
505 }
506
507 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
508 if (val < 0) {
509 snand_log_chip(snf->pdev,
510 "Failed to get configuration feature\n");
511 return val;
512 }
513
514 if (newval != val)
515 return -ENOTSUPP;
516
517 return 0;
518}
519
520static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
521{
522 int ret;
523
524 if (enable)
525 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
526 else
527 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
528
529 if (ret) {
530 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
531 enable ? "enable" : "disable");
532 }
533
534 return ret;
535}
536
537static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
538{
539 int ret;
540
541 if (enable) {
542 ret = mtk_snand_config_feature(snf, 0,
543 SNAND_FEATURE_QUAD_ENABLE);
544 } else {
545 ret = mtk_snand_config_feature(snf,
546 SNAND_FEATURE_QUAD_ENABLE, 0);
547 }
548
549 if (ret) {
550 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
551 enable ? "enable" : "disable");
552 }
553
554 return ret;
555}
556
557static int mtk_snand_unlock(struct mtk_snand *snf)
558{
559 int ret;
560
561 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
562 if (ret) {
563 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
564 return ret;
565 }
566
567 return 0;
568}
569
570static int mtk_snand_write_enable(struct mtk_snand *snf)
571{
572 uint8_t op = SNAND_CMD_WRITE_ENABLE;
573 int ret, val;
574
575 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
576 if (ret)
577 return ret;
578
579 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
580 if (val < 0)
581 return ret;
582
583 if (val & SNAND_STATUS_WEL)
584 return 0;
585
586 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
587
588 return -ENOTSUPP;
589}
590
591static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
592{
593 if (!snf->select_die)
594 return 0;
595
596 return snf->select_die(snf, dieidx);
597}
598
599static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
600 uint64_t addr)
601{
602 uint32_t dieidx;
603
604 if (!snf->select_die)
605 return addr;
606
607 dieidx = addr >> snf->die_shift;
608
609 mtk_snand_select_die(snf, dieidx);
610
611 return addr & snf->die_mask;
612}
613
614static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
615 uint32_t page)
616{
617 uint32_t pages_per_block;
618
619 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
620
621 if (page & pages_per_block)
622 return 1 << (snf->writesize_shift + 1);
623
624 return 0;
625}
626
627static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
628{
629 uint8_t op[4];
630
631 op[0] = cmd;
632 op[1] = (page >> 16) & 0xff;
633 op[2] = (page >> 8) & 0xff;
634 op[3] = page & 0xff;
635
636 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
637}
638
639static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
640{
641 uint32_t vall, valm;
642 uint8_t *oobptr = buf;
643 int i, j;
644
645 for (i = 0; i < snf->ecc_steps; i++) {
646 vall = nfi_read32(snf, NFI_FDML(i));
647 valm = nfi_read32(snf, NFI_FDMM(i));
648
649 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
650 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
651
652 oobptr += snf->nfi_soc->fdm_size;
653 }
654}
655
developer4da1bed2021-05-08 17:30:37 +0800656static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
657 uint32_t sect, uint8_t *oob)
658{
659 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
660 uint32_t coladdr, raw_offs, offs;
661 uint8_t op[4];
662
663 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
664 snand_log_snfi(snf->pdev,
665 "ECC parity size does not fit the GPRAM\n");
666 return -ENOTSUPP;
667 }
668
669 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
670 snf->nfi_soc->fdm_size;
671 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
672
673 /* Column address with plane bit */
674 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
675
676 op[0] = SNAND_CMD_READ_FROM_CACHE;
677 op[1] = (coladdr >> 8) & 0xff;
678 op[2] = coladdr & 0xff;
679 op[3] = 0;
680
681 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
682}
683
684static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
685{
686 uint8_t *oob = snf->page_cache + snf->writesize;
687 int i, rc, ret = 0, max_bitflips = 0;
688
689 for (i = 0; i < snf->ecc_steps; i++) {
690 if (snf->sect_bf[i] >= 0) {
691 if (snf->sect_bf[i] > max_bitflips)
692 max_bitflips = snf->sect_bf[i];
693 continue;
694 }
695
696 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
697 if (rc)
698 return rc;
699
700 rc = mtk_ecc_fixup_empty_sector(snf, i);
701 if (rc < 0) {
702 ret = -EBADMSG;
703
704 snand_log_ecc(snf->pdev,
705 "Uncorrectable bitflips in page %u sect %u\n",
706 page, i);
developer5136a3f2021-05-20 10:58:54 +0800707 } else if (rc) {
developer4da1bed2021-05-08 17:30:37 +0800708 snf->sect_bf[i] = rc;
709
710 if (snf->sect_bf[i] > max_bitflips)
711 max_bitflips = snf->sect_bf[i];
712
713 snand_log_ecc(snf->pdev,
714 "%u bitflip%s corrected in page %u sect %u\n",
715 rc, rc > 1 ? "s" : "", page, i);
developer5136a3f2021-05-20 10:58:54 +0800716 } else {
717 snf->sect_bf[i] = 0;
developer4da1bed2021-05-08 17:30:37 +0800718 }
719 }
720
721 return ret ? ret : max_bitflips;
722}
723
developerfd40db22021-04-29 10:08:25 +0800724static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
725{
developerae50ce92021-05-18 19:08:57 +0800726 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800727 uintptr_t dma_addr;
728 int ret;
729
730 /* Column address with plane bit */
731 coladdr = mtk_snand_get_plane_address(snf, page);
732
733 mtk_snand_mac_reset(snf);
734 mtk_nfi_reset(snf);
735
736 /* Command and dummy cycles */
737 nfi_write32(snf, SNF_RD_CTL2,
738 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
739 (snf->opcode_rfc << DATA_READ_CMD_S));
740
741 /* Column address */
742 nfi_write32(snf, SNF_RD_CTL3, coladdr);
743
744 /* Set read mode */
745 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
746 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, mode | DATARD_CUSTOM_EN);
747
748 /* Set bytes to read */
749 rwbytes = snf->ecc_steps * snf->raw_sector_size;
750 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
751 rwbytes);
752
753 /* NFI read prepare */
754 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
755 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
756 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
757
758 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
759
760 /* Prepare for DMA read */
761 len = snf->writesize + snf->oobsize;
762 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
763 if (ret) {
764 snand_log_nfi(snf->pdev,
765 "DMA map from device failed with %d\n", ret);
766 return ret;
767 }
768
769 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
770
771 if (!raw)
772 mtk_snand_ecc_decoder_start(snf);
773
774 /* Prepare for custom read interrupt */
775 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
776 irq_completion_init(snf->pdev);
777
778 /* Trigger NFI into custom mode */
779 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
780
781 /* Start DMA read */
782 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
783 nfi_write16(snf, NFI_STRDATA, STR_DATA);
784
785 /* Wait for operation finished */
786 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
787 CUS_READ_DONE, SNFI_POLL_INTERVAL);
788 if (ret) {
789 snand_log_nfi(snf->pdev,
790 "DMA timed out for reading from cache\n");
791 goto cleanup;
792 }
793
developerae50ce92021-05-18 19:08:57 +0800794 /* Wait for BUS_SEC_CNTR returning expected value */
795 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
796 BUS_SEC_CNTR(val) >= snf->ecc_steps,
797 0, SNFI_POLL_INTERVAL);
798 if (ret) {
799 snand_log_nfi(snf->pdev,
800 "Timed out waiting for BUS_SEC_CNTR\n");
801 goto cleanup;
802 }
803
804 /* Wait for bus becoming idle */
805 ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
806 !(val & snf->nfi_soc->mastersta_mask),
807 0, SNFI_POLL_INTERVAL);
808 if (ret) {
809 snand_log_nfi(snf->pdev,
810 "Timed out waiting for bus becoming idle\n");
811 goto cleanup;
812 }
813
developerfd40db22021-04-29 10:08:25 +0800814 if (!raw) {
815 ret = mtk_ecc_wait_decoder_done(snf);
816 if (ret)
817 goto cleanup;
818
819 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
820
developer4da1bed2021-05-08 17:30:37 +0800821 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800822 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800823
824 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800825 }
826
827cleanup:
828 /* DMA cleanup */
829 dma_mem_unmap(snf->pdev, dma_addr, len, false);
830
831 /* Stop read */
832 nfi_write32(snf, NFI_CON, 0);
developer4b9635a2021-05-18 14:22:39 +0800833 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +0800834
835 /* Clear SNF done flag */
836 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
837 nfi_write32(snf, SNF_STA_CTL1, 0);
838
839 /* Disable interrupt */
840 nfi_read32(snf, NFI_INTR_STA);
841 nfi_write32(snf, NFI_INTR_EN, 0);
842
843 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
844
845 return ret;
846}
847
848static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
849{
850 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
851 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
852 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
853
854 for (i = 0; i < snf->ecc_steps; i++) {
855 raw_sector = snf->page_cache + i * snf->raw_sector_size;
856
857 if (buf) {
858 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
859 bufptr += snf->nfi_soc->sector_size;
860 }
861
862 raw_sector += snf->nfi_soc->sector_size;
863
864 if (oob) {
865 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
866 oobptr += snf->nfi_soc->fdm_size;
867 raw_sector += snf->nfi_soc->fdm_size;
868
869 memcpy(eccptr, raw_sector, ecc_bytes);
870 eccptr += ecc_bytes;
871 }
872 }
873}
874
875static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
876 void *buf, void *oob, bool raw, bool format)
877{
878 uint64_t die_addr;
879 uint32_t page;
880 int ret;
881
882 die_addr = mtk_snand_select_die_address(snf, addr);
883 page = die_addr >> snf->writesize_shift;
884
885 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
886 if (ret)
887 return ret;
888
889 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
890 if (ret < 0) {
891 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
892 return ret;
893 }
894
895 ret = mtk_snand_read_cache(snf, page, raw);
896 if (ret < 0 && ret != -EBADMSG)
897 return ret;
898
899 if (raw) {
900 if (format) {
901 mtk_snand_bm_swap_raw(snf);
902 mtk_snand_fdm_bm_swap_raw(snf);
903 mtk_snand_from_raw_page(snf, buf, oob);
904 } else {
905 if (buf)
906 memcpy(buf, snf->page_cache, snf->writesize);
907
908 if (oob) {
909 memset(oob, 0xff, snf->oobsize);
910 memcpy(oob, snf->page_cache + snf->writesize,
911 snf->ecc_steps * snf->spare_per_sector);
912 }
913 }
914 } else {
915 mtk_snand_bm_swap(snf);
916 mtk_snand_fdm_bm_swap(snf);
917
918 if (buf)
919 memcpy(buf, snf->page_cache, snf->writesize);
920
921 if (oob) {
922 memset(oob, 0xff, snf->oobsize);
923 memcpy(oob, snf->page_cache + snf->writesize,
924 snf->ecc_steps * snf->nfi_soc->fdm_size);
925 }
926 }
927
928 return ret;
929}
930
931int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
932 void *oob, bool raw)
933{
934 if (!snf || (!buf && !oob))
935 return -EINVAL;
936
937 if (addr >= snf->size)
938 return -EINVAL;
939
940 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
941}
942
943static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
944{
945 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
946 const uint8_t *oobptr = buf;
947 int i, j;
948
949 for (i = 0; i < snf->ecc_steps; i++) {
950 vall = 0;
951 valm = 0;
952
953 for (j = 0; j < 8; j++) {
954 if (j < 4)
955 vall |= (j < fdm_size ? oobptr[j] : 0xff)
956 << (j * 8);
957 else
958 valm |= (j < fdm_size ? oobptr[j] : 0xff)
959 << ((j - 4) * 8);
960 }
961
962 nfi_write32(snf, NFI_FDML(i), vall);
963 nfi_write32(snf, NFI_FDMM(i), valm);
964
965 oobptr += fdm_size;
966 }
967}
968
969static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
970 bool raw)
971{
developerae50ce92021-05-18 19:08:57 +0800972 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800973 uintptr_t dma_addr;
974 int ret;
975
976 /* Column address with plane bit */
977 coladdr = mtk_snand_get_plane_address(snf, page);
978
979 mtk_snand_mac_reset(snf);
980 mtk_nfi_reset(snf);
981
982 /* Write FDM registers if necessary */
983 if (!raw)
984 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
985
986 /* Command */
987 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
988
989 /* Column address */
990 nfi_write32(snf, SNF_PG_CTL2, coladdr);
991
992 /* Set write mode */
993 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
994 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
995
996 /* Set bytes to write */
997 rwbytes = snf->ecc_steps * snf->raw_sector_size;
998 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
999 rwbytes);
1000
1001 /* NFI write prepare */
1002 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
1003 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1004 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
1005
1006 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
1007
1008 /* Prepare for DMA write */
1009 len = snf->writesize + snf->oobsize;
1010 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
1011 if (ret) {
1012 snand_log_nfi(snf->pdev,
1013 "DMA map to device failed with %d\n", ret);
1014 return ret;
1015 }
1016
1017 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
1018
1019 if (!raw)
1020 mtk_snand_ecc_encoder_start(snf);
1021
1022 /* Prepare for custom write interrupt */
1023 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1024 irq_completion_init(snf->pdev);
1025
1026 /* Trigger NFI into custom mode */
1027 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1028
1029 /* Start DMA write */
1030 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1031 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1032
1033 /* Wait for operation finished */
1034 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1035 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1036 if (ret) {
1037 snand_log_nfi(snf->pdev,
1038 "DMA timed out for program load\n");
1039 goto cleanup;
1040 }
1041
developerab415832021-05-26 09:49:32 +08001042 /* Wait for NFI_SEC_CNTR returning expected value */
1043 ret = read32_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1044 NFI_SEC_CNTR(val) >= snf->ecc_steps,
developerae50ce92021-05-18 19:08:57 +08001045 0, SNFI_POLL_INTERVAL);
1046 if (ret) {
1047 snand_log_nfi(snf->pdev,
1048 "Timed out waiting for BUS_SEC_CNTR\n");
1049 goto cleanup;
1050 }
1051
developerfd40db22021-04-29 10:08:25 +08001052 if (!raw)
1053 mtk_snand_ecc_encoder_stop(snf);
1054
1055cleanup:
1056 /* DMA cleanup */
1057 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1058
1059 /* Stop write */
developer4b9635a2021-05-18 14:22:39 +08001060 nfi_write32(snf, NFI_CON, 0);
1061 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +08001062
1063 /* Clear SNF done flag */
1064 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1065 nfi_write32(snf, SNF_STA_CTL1, 0);
1066
1067 /* Disable interrupt */
1068 nfi_read32(snf, NFI_INTR_STA);
1069 nfi_write32(snf, NFI_INTR_EN, 0);
1070
1071 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1072
1073 return ret;
1074}
1075
1076static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1077 const void *buf, const void *oob,
1078 bool empty_ecc)
1079{
1080 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1081 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1082 const uint8_t *bufptr = buf, *oobptr = oob;
1083 uint8_t *raw_sector;
1084
1085 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1086 for (i = 0; i < snf->ecc_steps; i++) {
1087 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1088
1089 if (buf) {
1090 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1091 bufptr += snf->nfi_soc->sector_size;
1092 }
1093
1094 raw_sector += snf->nfi_soc->sector_size;
1095
1096 if (oob) {
1097 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1098 oobptr += snf->nfi_soc->fdm_size;
1099 raw_sector += snf->nfi_soc->fdm_size;
1100
1101 if (empty_ecc)
1102 memset(raw_sector, 0xff, ecc_bytes);
1103 else
1104 memcpy(raw_sector, eccptr, ecc_bytes);
1105 eccptr += ecc_bytes;
1106 }
1107 }
1108}
1109
1110static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1111 const void *oob)
1112{
1113 const uint8_t *p = buf;
1114 uint32_t i, j;
1115
1116 if (buf) {
1117 for (i = 0; i < snf->writesize; i++) {
1118 if (p[i] != 0xff)
1119 return false;
1120 }
1121 }
1122
1123 if (oob) {
1124 for (j = 0; j < snf->ecc_steps; j++) {
1125 p = oob + j * snf->nfi_soc->fdm_size;
1126
1127 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1128 if (p[i] != 0xff)
1129 return false;
1130 }
1131 }
1132 }
1133
1134 return true;
1135}
1136
1137static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1138 const void *buf, const void *oob,
1139 bool raw, bool format)
1140{
1141 uint64_t die_addr;
1142 bool empty_ecc = false;
1143 uint32_t page;
1144 int ret;
1145
1146 die_addr = mtk_snand_select_die_address(snf, addr);
1147 page = die_addr >> snf->writesize_shift;
1148
1149 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1150 /*
1151 * If the data in the page to be ecc-ed is full 0xff,
1152 * change to raw write mode
1153 */
1154 raw = true;
1155 format = true;
1156
1157 /* fill ecc parity code region with 0xff */
1158 empty_ecc = true;
1159 }
1160
1161 if (raw) {
1162 if (format) {
1163 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1164 mtk_snand_fdm_bm_swap_raw(snf);
1165 mtk_snand_bm_swap_raw(snf);
1166 } else {
1167 memset(snf->page_cache, 0xff,
1168 snf->writesize + snf->oobsize);
1169
1170 if (buf)
1171 memcpy(snf->page_cache, buf, snf->writesize);
1172
1173 if (oob) {
1174 memcpy(snf->page_cache + snf->writesize, oob,
1175 snf->ecc_steps * snf->spare_per_sector);
1176 }
1177 }
1178 } else {
1179 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1180 if (buf)
1181 memcpy(snf->page_cache, buf, snf->writesize);
1182
1183 if (oob) {
1184 memcpy(snf->page_cache + snf->writesize, oob,
1185 snf->ecc_steps * snf->nfi_soc->fdm_size);
1186 }
1187
1188 mtk_snand_fdm_bm_swap(snf);
1189 mtk_snand_bm_swap(snf);
1190 }
1191
1192 ret = mtk_snand_write_enable(snf);
1193 if (ret)
1194 return ret;
1195
1196 ret = mtk_snand_program_load(snf, page, raw);
1197 if (ret)
1198 return ret;
1199
1200 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1201 if (ret)
1202 return ret;
1203
1204 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1205 if (ret < 0) {
1206 snand_log_chip(snf->pdev,
1207 "Page program command timed out on page %u\n",
1208 page);
1209 return ret;
1210 }
1211
1212 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1213 snand_log_chip(snf->pdev,
1214 "Page program failed on page %u\n", page);
1215 return -EIO;
1216 }
1217
1218 return 0;
1219}
1220
1221int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1222 const void *oob, bool raw)
1223{
1224 if (!snf || (!buf && !oob))
1225 return -EINVAL;
1226
1227 if (addr >= snf->size)
1228 return -EINVAL;
1229
1230 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1231}
1232
1233int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1234{
1235 uint64_t die_addr;
1236 uint32_t page, block;
1237 int ret;
1238
1239 if (!snf)
1240 return -EINVAL;
1241
1242 if (addr >= snf->size)
1243 return -EINVAL;
1244
1245 die_addr = mtk_snand_select_die_address(snf, addr);
1246 block = die_addr >> snf->erasesize_shift;
1247 page = block << (snf->erasesize_shift - snf->writesize_shift);
1248
1249 ret = mtk_snand_write_enable(snf);
1250 if (ret)
1251 return ret;
1252
1253 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1254 if (ret)
1255 return ret;
1256
1257 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1258 if (ret < 0) {
1259 snand_log_chip(snf->pdev,
1260 "Block erase command timed out on block %u\n",
1261 block);
1262 return ret;
1263 }
1264
1265 if (ret & SNAND_STATUS_ERASE_FAIL) {
1266 snand_log_chip(snf->pdev,
1267 "Block erase failed on block %u\n", block);
1268 return -EIO;
1269 }
1270
1271 return 0;
1272}
1273
1274static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1275{
1276 int ret;
1277
1278 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1279 false);
1280 if (ret && ret != -EBADMSG)
1281 return ret;
1282
1283 return snf->buf_cache[0] != 0xff;
1284}
1285
1286static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1287{
1288 int ret;
1289
1290 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1291 true);
1292 if (ret && ret != -EBADMSG)
1293 return ret;
1294
1295 return snf->buf_cache[0] != 0xff;
1296}
1297
1298int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1299{
1300 if (!snf)
1301 return -EINVAL;
1302
1303 if (addr >= snf->size)
1304 return -EINVAL;
1305
1306 addr &= ~snf->erasesize_mask;
1307
1308 if (snf->nfi_soc->bbm_swap)
1309 return mtk_snand_block_isbad_std(snf, addr);
1310
1311 return mtk_snand_block_isbad_mtk(snf, addr);
1312}
1313
1314static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1315{
1316 /* Standard BBM position */
1317 memset(snf->buf_cache, 0xff, snf->oobsize);
1318 snf->buf_cache[0] = 0;
1319
1320 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1321 false);
1322}
1323
1324static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1325{
1326 /* Write the whole page with zeros */
1327 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1328
1329 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1330 snf->buf_cache + snf->writesize, true,
1331 true);
1332}
1333
1334int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1335{
1336 if (!snf)
1337 return -EINVAL;
1338
1339 if (addr >= snf->size)
1340 return -EINVAL;
1341
1342 addr &= ~snf->erasesize_mask;
1343
1344 if (snf->nfi_soc->bbm_swap)
1345 return mtk_snand_block_markbad_std(snf, addr);
1346
1347 return mtk_snand_block_markbad_mtk(snf, addr);
1348}
1349
1350int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1351 const uint8_t *oobbuf, size_t ooblen)
1352{
1353 size_t len = ooblen, sect_fdm_len;
1354 const uint8_t *oob = oobbuf;
1355 uint32_t step = 0;
1356
1357 if (!snf || !oobraw || !oob)
1358 return -EINVAL;
1359
1360 while (len && step < snf->ecc_steps) {
1361 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1362 if (sect_fdm_len > len)
1363 sect_fdm_len = len;
1364
1365 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1366 sect_fdm_len);
1367
1368 len -= sect_fdm_len;
1369 oob += sect_fdm_len;
1370 step++;
1371 }
1372
1373 return len;
1374}
1375
1376int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1377 size_t ooblen, const uint8_t *oobraw)
1378{
1379 size_t len = ooblen, sect_fdm_len;
1380 uint8_t *oob = oobbuf;
1381 uint32_t step = 0;
1382
1383 if (!snf || !oobraw || !oob)
1384 return -EINVAL;
1385
1386 while (len && step < snf->ecc_steps) {
1387 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1388 if (sect_fdm_len > len)
1389 sect_fdm_len = len;
1390
1391 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1392 sect_fdm_len);
1393
1394 len -= sect_fdm_len;
1395 oob += sect_fdm_len;
1396 step++;
1397 }
1398
1399 return len;
1400}
1401
1402int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1403 void *buf, void *oob, size_t ooblen,
1404 size_t *actualooblen, bool raw)
1405{
1406 int ret, oobremain;
1407
1408 if (!snf)
1409 return -EINVAL;
1410
1411 if (!oob)
1412 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1413
1414 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1415 if (ret && ret != -EBADMSG) {
1416 if (actualooblen)
1417 *actualooblen = 0;
1418 return ret;
1419 }
1420
1421 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1422 if (actualooblen)
1423 *actualooblen = ooblen - oobremain;
1424
1425 return ret;
1426}
1427
1428int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1429 const void *buf, const void *oob,
1430 size_t ooblen, size_t *actualooblen, bool raw)
1431{
1432 int oobremain;
1433
1434 if (!snf)
1435 return -EINVAL;
1436
1437 if (!oob)
1438 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1439
1440 memset(snf->buf_cache, 0xff, snf->oobsize);
1441 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1442 if (actualooblen)
1443 *actualooblen = ooblen - oobremain;
1444
1445 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1446}
1447
1448int mtk_snand_get_chip_info(struct mtk_snand *snf,
1449 struct mtk_snand_chip_info *info)
1450{
1451 if (!snf || !info)
1452 return -EINVAL;
1453
1454 info->model = snf->model;
1455 info->chipsize = snf->size;
1456 info->blocksize = snf->erasesize;
1457 info->pagesize = snf->writesize;
1458 info->sparesize = snf->oobsize;
1459 info->spare_per_sector = snf->spare_per_sector;
1460 info->fdm_size = snf->nfi_soc->fdm_size;
1461 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1462 info->num_sectors = snf->ecc_steps;
1463 info->sector_size = snf->nfi_soc->sector_size;
1464 info->ecc_strength = snf->ecc_strength;
1465 info->ecc_bytes = snf->ecc_bytes;
1466
1467 return 0;
1468}
1469
1470int mtk_snand_irq_process(struct mtk_snand *snf)
1471{
1472 uint32_t sta, ien;
1473
1474 if (!snf)
1475 return -EINVAL;
1476
1477 sta = nfi_read32(snf, NFI_INTR_STA);
1478 ien = nfi_read32(snf, NFI_INTR_EN);
1479
1480 if (!(sta & ien))
1481 return 0;
1482
1483 nfi_write32(snf, NFI_INTR_EN, 0);
1484 irq_completion_done(snf->pdev);
1485
1486 return 1;
1487}
1488
1489static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1490{
1491 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1492 int i, mul = 1;
1493
1494 /*
1495 * If we're using the 1KB sector size, HW will automatically
1496 * double the spare size. So we should only use half of the value.
1497 */
1498 if (snf->nfi_soc->sector_size == 1024)
1499 mul = 2;
1500
1501 spare_per_step /= mul;
1502
1503 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1504 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1505 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1506 snf->spare_per_sector *= mul;
1507 return i;
1508 }
1509 }
1510
1511 snand_log_nfi(snf->pdev,
1512 "Page size %u+%u is not supported\n", snf->writesize,
1513 snf->oobsize);
1514
1515 return -1;
1516}
1517
1518static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1519{
1520 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1521 uint32_t sector_size_512;
1522
1523 if (snf->nfi_soc->sector_size == 512) {
1524 sector_size_512 = NFI_SEC_SEL_512;
1525 spare_size_shift = NFI_SPARE_SIZE_S;
1526 } else {
1527 sector_size_512 = 0;
1528 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1529 }
1530
1531 switch (snf->writesize) {
1532 case SZ_512:
1533 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1534 break;
1535 case SZ_2K:
1536 if (snf->nfi_soc->sector_size == 512)
1537 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1538 else
1539 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1540 break;
1541 case SZ_4K:
1542 if (snf->nfi_soc->sector_size == 512)
1543 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1544 else
1545 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1546 break;
1547 case SZ_8K:
1548 if (snf->nfi_soc->sector_size == 512)
1549 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1550 else
1551 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1552 break;
1553 case SZ_16K:
1554 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1555 break;
1556 default:
1557 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1558 snf->writesize);
1559 return -ENOTSUPP;
1560 }
1561
1562 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1563 if (unlikely(spare_size_idx < 0))
1564 return -ENOTSUPP;
1565
1566 snf->raw_sector_size = snf->nfi_soc->sector_size +
1567 snf->spare_per_sector;
1568
1569 /* Setup page format */
1570 nfi_write32(snf, NFI_PAGEFMT,
1571 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1572 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1573 (spare_size_idx << spare_size_shift) |
1574 (pagesize_idx << NFI_PAGE_SIZE_S) |
1575 sector_size_512);
1576
1577 return 0;
1578}
1579
1580static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1581 uint32_t snfi_caps, uint8_t *opcode,
1582 uint8_t *dummy,
1583 const struct snand_io_cap *op_cap)
1584{
1585 uint32_t i, caps;
1586
1587 caps = snfi_caps & op_cap->caps;
1588
1589 i = fls(caps);
1590 if (i > 0) {
1591 *opcode = op_cap->opcodes[i - 1].opcode;
1592 if (dummy)
1593 *dummy = op_cap->opcodes[i - 1].dummy;
1594 return i - 1;
1595 }
1596
1597 return __SNAND_IO_MAX;
1598}
1599
1600static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1601 uint32_t snfi_caps,
1602 const struct snand_io_cap *op_cap)
1603{
1604 enum snand_flash_io idx;
1605
1606 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1607 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1608 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1609 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1610 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1611 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1612 };
1613
1614 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1615 &snf->dummy_rfc, op_cap);
1616 if (idx >= __SNAND_IO_MAX) {
1617 snand_log_snfi(snf->pdev,
1618 "No capable opcode for read from cache\n");
1619 return -ENOTSUPP;
1620 }
1621
1622 snf->mode_rfc = rfc_modes[idx];
1623
1624 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1625 snf->quad_spi_op = true;
1626
1627 return 0;
1628}
1629
1630static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1631 const struct snand_io_cap *op_cap)
1632{
1633 enum snand_flash_io idx;
1634
1635 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1636 [SNAND_IO_1_1_1] = 0,
1637 [SNAND_IO_1_1_4] = 1,
1638 };
1639
1640 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1641 NULL, op_cap);
1642 if (idx >= __SNAND_IO_MAX) {
1643 snand_log_snfi(snf->pdev,
1644 "No capable opcode for program load\n");
1645 return -ENOTSUPP;
1646 }
1647
1648 snf->mode_pl = pl_modes[idx];
1649
1650 if (idx == SNAND_IO_1_1_4)
1651 snf->quad_spi_op = true;
1652
1653 return 0;
1654}
1655
1656static int mtk_snand_setup(struct mtk_snand *snf,
1657 const struct snand_flash_info *snand_info)
1658{
1659 const struct snand_mem_org *memorg = &snand_info->memorg;
1660 uint32_t i, msg_size, snfi_caps;
1661 int ret;
1662
1663 /* Calculate flash memory organization */
1664 snf->model = snand_info->model;
1665 snf->writesize = memorg->pagesize;
1666 snf->oobsize = memorg->sparesize;
1667 snf->erasesize = snf->writesize * memorg->pages_per_block;
1668 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1669 snf->size = snf->die_size * memorg->ndies;
1670 snf->num_dies = memorg->ndies;
1671
1672 snf->writesize_mask = snf->writesize - 1;
1673 snf->erasesize_mask = snf->erasesize - 1;
1674 snf->die_mask = snf->die_size - 1;
1675
1676 snf->writesize_shift = ffs(snf->writesize) - 1;
1677 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1678 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1679
1680 snf->select_die = snand_info->select_die;
1681
1682 /* Determine opcodes for read from cache/program load */
1683 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1684 if (snf->snfi_quad_spi)
1685 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1686
1687 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1688 if (ret)
1689 return ret;
1690
1691 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1692 if (ret)
1693 return ret;
1694
1695 /* ECC and page format */
1696 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1697 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1698 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1699 snf->writesize);
1700 return -ENOTSUPP;
1701 }
1702
1703 ret = mtk_snand_pagefmt_setup(snf);
1704 if (ret)
1705 return ret;
1706
1707 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1708 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1709 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1710 msg_size);
1711 if (ret)
1712 return ret;
1713
1714 nfi_write16(snf, NFI_CNFG, 0);
1715
1716 /* Tuning options */
1717 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
1718 nfi_write32(snf, SNF_DLY_CTL3, (40 << SFCK_SAM_DLY_S));
1719
1720 /* Interrupts */
1721 nfi_read32(snf, NFI_INTR_STA);
1722 nfi_write32(snf, NFI_INTR_EN, 0);
1723
1724 /* Clear SNF done flag */
1725 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1726 nfi_write32(snf, SNF_STA_CTL1, 0);
1727
1728 /* Initialization on all dies */
1729 for (i = 0; i < snf->num_dies; i++) {
1730 mtk_snand_select_die(snf, i);
1731
1732 /* Disable On-Die ECC engine */
1733 ret = mtk_snand_ondie_ecc_control(snf, false);
1734 if (ret)
1735 return ret;
1736
1737 /* Disable block protection */
1738 mtk_snand_unlock(snf);
1739
1740 /* Enable/disable quad-spi */
1741 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1742 }
1743
1744 mtk_snand_select_die(snf, 0);
1745
1746 return 0;
1747}
1748
1749static int mtk_snand_id_probe(struct mtk_snand *snf,
1750 const struct snand_flash_info **snand_info)
1751{
1752 uint8_t id[4], op[2];
1753 int ret;
1754
1755 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1756 op[0] = SNAND_CMD_READID;
1757 op[1] = 0;
1758 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1759 if (ret)
1760 return ret;
1761
1762 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1763 if (*snand_info)
1764 return 0;
1765
1766 /* Read SPI-NAND JEDEC ID, OP + ID */
1767 op[0] = SNAND_CMD_READID;
1768 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1769 if (ret)
1770 return ret;
1771
1772 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1773 if (*snand_info)
1774 return 0;
1775
1776 snand_log_chip(snf->pdev,
1777 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1778 id[0], id[1], id[2], id[3]);
1779
1780 return -EINVAL;
1781}
1782
1783int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1784 struct mtk_snand **psnf)
1785{
1786 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001787 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001788 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001789 int ret;
1790
1791 if (!pdata || !psnf)
1792 return -EINVAL;
1793
1794 if (pdata->soc >= __SNAND_SOC_MAX) {
1795 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1796 pdata->soc);
1797 return -EINVAL;
1798 }
1799
1800 /* Dummy instance only for initial reset and id probe */
1801 tmpsnf.nfi_base = pdata->nfi_base;
1802 tmpsnf.ecc_base = pdata->ecc_base;
1803 tmpsnf.soc = pdata->soc;
1804 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1805 tmpsnf.pdev = dev;
1806
1807 /* Switch to SNFI mode */
1808 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1809
1810 /* Reset SNFI & NFI */
1811 mtk_snand_mac_reset(&tmpsnf);
1812 mtk_nfi_reset(&tmpsnf);
1813
1814 /* Reset SPI-NAND chip */
1815 ret = mtk_snand_chip_reset(&tmpsnf);
1816 if (ret) {
1817 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1818 return ret;
1819 }
1820
1821 /* Probe SPI-NAND flash by JEDEC ID */
1822 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1823 if (ret)
1824 return ret;
1825
1826 rawpage_size = snand_info->memorg.pagesize +
1827 snand_info->memorg.sparesize;
1828
developer4da1bed2021-05-08 17:30:37 +08001829 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1830 sizeof(*snf->sect_bf);
1831
developerfd40db22021-04-29 10:08:25 +08001832 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001833 snf = generic_mem_alloc(dev,
1834 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001835 if (!snf) {
1836 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1837 return -ENOMEM;
1838 }
1839
developer4da1bed2021-05-08 17:30:37 +08001840 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1841 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001842
1843 /* Allocate memory for DMA buffer */
1844 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1845 if (!snf->page_cache) {
1846 generic_mem_free(dev, snf);
1847 snand_log_chip(dev,
1848 "Failed to allocate memory for DMA buffer\n");
1849 return -ENOMEM;
1850 }
1851
1852 /* Fill up instance */
1853 snf->pdev = dev;
1854 snf->nfi_base = pdata->nfi_base;
1855 snf->ecc_base = pdata->ecc_base;
1856 snf->soc = pdata->soc;
1857 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1858 snf->snfi_quad_spi = pdata->quad_spi;
1859
1860 /* Initialize SNFI & ECC engine */
1861 ret = mtk_snand_setup(snf, snand_info);
1862 if (ret) {
1863 dma_mem_free(dev, snf->page_cache);
1864 generic_mem_free(dev, snf);
1865 return ret;
1866 }
1867
1868 *psnf = snf;
1869
1870 return 0;
1871}
1872
1873int mtk_snand_cleanup(struct mtk_snand *snf)
1874{
1875 if (!snf)
1876 return 0;
1877
1878 dma_mem_free(snf->pdev, snf->page_cache);
1879 generic_mem_free(snf->pdev, snf);
1880
1881 return 0;
1882}