blob: 0dacf4f06a37b4427332de719d008aed14c1c3de [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
60#define NFI_STRADDR 0x080
61
62#define NFI_FDM0L 0x0a0
63#define NFI_FDM0M 0x0a4
64#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
65#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
66
67#define NFI_DEBUG_CON1 0x220
68#define WBUF_EN BIT(2)
69
70#define NFI_MASTERSTA 0x224
71#define MAS_ADDR GENMASK(11, 9)
72#define MAS_RD GENMASK(8, 6)
73#define MAS_WR GENMASK(5, 3)
74#define MAS_RDDLY GENMASK(2, 0)
75#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
76#define AHB_BUS_BUSY BIT(1)
77#define BUS_BUSY BIT(0)
78#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
79
80/* SNFI registers */
81#define SNF_MAC_CTL 0x500
82#define MAC_XIO_SEL BIT(4)
83#define SF_MAC_EN BIT(3)
84#define SF_TRIG BIT(2)
85#define WIP_READY BIT(1)
86#define WIP BIT(0)
87
88#define SNF_MAC_OUTL 0x504
89#define SNF_MAC_INL 0x508
90
91#define SNF_RD_CTL2 0x510
92#define DATA_READ_DUMMY_S 8
93#define DATA_READ_CMD_S 0
94
95#define SNF_RD_CTL3 0x514
96
97#define SNF_PG_CTL1 0x524
98#define PG_LOAD_CMD_S 8
99
100#define SNF_PG_CTL2 0x528
101
102#define SNF_MISC_CTL 0x538
103#define SW_RST BIT(28)
104#define FIFO_RD_LTC_S 25
105#define PG_LOAD_X4_EN BIT(20)
106#define DATA_READ_MODE_S 16
107#define DATA_READ_MODE GENMASK(18, 16)
108#define DATA_READ_MODE_X1 0
109#define DATA_READ_MODE_X2 1
110#define DATA_READ_MODE_X4 2
111#define DATA_READ_MODE_DUAL 5
112#define DATA_READ_MODE_QUAD 6
113#define PG_LOAD_CUSTOM_EN BIT(7)
114#define DATARD_CUSTOM_EN BIT(6)
115#define CS_DESELECT_CYC_S 0
116
117#define SNF_MISC_CTL2 0x53c
118#define PROGRAM_LOAD_BYTE_NUM_S 16
119#define READ_DATA_BYTE_NUM_S 11
120
121#define SNF_DLY_CTL3 0x548
122#define SFCK_SAM_DLY_S 0
123
124#define SNF_STA_CTL1 0x550
125#define CUS_PG_DONE BIT(28)
126#define CUS_READ_DONE BIT(27)
127#define SPI_STATE_S 0
128#define SPI_STATE GENMASK(3, 0)
129
130#define SNF_CFG 0x55c
131#define SPI_MODE BIT(0)
132
133#define SNF_GPRAM 0x800
134#define SNF_GPRAM_SIZE 0xa0
135
136#define SNFI_POLL_INTERVAL 1000000
137
138static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
139
140static const uint8_t mt7986_spare_sizes[] = {
141 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
142 67, 74
143};
144
145static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
146 [SNAND_SOC_MT7622] = {
147 .sector_size = 512,
148 .max_sectors = 8,
149 .fdm_size = 8,
150 .fdm_ecc_size = 1,
151 .fifo_size = 32,
152 .bbm_swap = false,
153 .empty_page_check = false,
154 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
155 .spare_sizes = mt7622_spare_sizes,
156 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
157 },
158 [SNAND_SOC_MT7629] = {
159 .sector_size = 512,
160 .max_sectors = 8,
161 .fdm_size = 8,
162 .fdm_ecc_size = 1,
163 .fifo_size = 32,
164 .bbm_swap = true,
165 .empty_page_check = false,
166 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
167 .spare_sizes = mt7622_spare_sizes,
168 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
169 },
170 [SNAND_SOC_MT7986] = {
171 .sector_size = 1024,
172 .max_sectors = 16,
173 .fdm_size = 8,
174 .fdm_ecc_size = 1,
175 .fifo_size = 64,
176 .bbm_swap = true,
177 .empty_page_check = true,
178 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
179 .spare_sizes = mt7986_spare_sizes,
180 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
181 },
182};
183
184static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
185{
186 return readl(snf->nfi_base + reg);
187}
188
189static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
190 uint32_t val)
191{
192 writel(val, snf->nfi_base + reg);
193}
194
195static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
196 uint16_t val)
197{
198 writew(val, snf->nfi_base + reg);
199}
200
201static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
202 uint32_t set)
203{
204 uint32_t val;
205
206 val = readl(snf->nfi_base + reg);
207 val &= ~clr;
208 val |= set;
209 writel(val, snf->nfi_base + reg);
210}
211
212static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
213 const uint8_t *data, uint32_t len)
214{
215 uint32_t i, val = 0, es = sizeof(uint32_t);
216
217 for (i = reg; i < reg + len; i++) {
218 val |= ((uint32_t)*data++) << (8 * (i % es));
219
220 if (i % es == es - 1 || i == reg + len - 1) {
221 nfi_write32(snf, i & ~(es - 1), val);
222 val = 0;
223 }
224 }
225}
226
227static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
228 uint32_t len)
229{
230 uint32_t i, val = 0, es = sizeof(uint32_t);
231
232 for (i = reg; i < reg + len; i++) {
233 if (i == reg || i % es == 0)
234 val = nfi_read32(snf, i & ~(es - 1));
235
236 *data++ = (uint8_t)(val >> (8 * (i % es)));
237 }
238}
239
240static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
241{
242 uint8_t tmp = *bm1;
243 *bm1 = *bm2;
244 *bm2 = tmp;
245}
246
247static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
248{
249 uint32_t fdm_bbm_pos;
250
251 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
252 return;
253
254 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
255 snf->nfi_soc->sector_size;
256 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
257 &snf->page_cache[snf->writesize]);
258}
259
260static void mtk_snand_bm_swap(struct mtk_snand *snf)
261{
262 uint32_t buf_bbm_pos, fdm_bbm_pos;
263
264 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
265 return;
266
267 buf_bbm_pos = snf->writesize -
268 (snf->ecc_steps - 1) * snf->spare_per_sector;
269 fdm_bbm_pos = snf->writesize +
270 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
271 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
272 &snf->page_cache[buf_bbm_pos]);
273}
274
275static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
276{
277 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
278
279 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
280 return;
281
282 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
283 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
284 snf->nfi_soc->sector_size;
285 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
286 &snf->page_cache[fdm_bbm_pos2]);
287}
288
289static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
290{
291 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
292
293 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
294 return;
295
296 fdm_bbm_pos1 = snf->writesize;
297 fdm_bbm_pos2 = snf->writesize +
298 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
299 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
300 &snf->page_cache[fdm_bbm_pos2]);
301}
302
303static int mtk_nfi_reset(struct mtk_snand *snf)
304{
305 uint32_t val, fifo_mask;
306 int ret;
307
308 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
309
310 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
311 !(val & snf->nfi_soc->mastersta_mask), 0,
312 SNFI_POLL_INTERVAL);
313 if (ret) {
314 snand_log_nfi(snf->pdev,
315 "NFI master is still busy after reset\n");
316 return ret;
317 }
318
319 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
320 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
321 SNFI_POLL_INTERVAL);
322 if (ret) {
323 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
324 return ret;
325 }
326
327 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
328 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
329 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
330 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
331 if (ret) {
332 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
333 return ret;
334 }
335
336 return 0;
337}
338
339static int mtk_snand_mac_reset(struct mtk_snand *snf)
340{
341 int ret;
342 uint32_t val;
343
344 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
345
346 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
347 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
348 if (ret)
349 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
350
351 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
352 (10 << CS_DESELECT_CYC_S));
353
354 return ret;
355}
356
357static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
358 uint32_t inlen)
359{
360 int ret;
361 uint32_t val;
362
363 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
364 nfi_write32(snf, SNF_MAC_OUTL, outlen);
365 nfi_write32(snf, SNF_MAC_INL, inlen);
366
367 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
368
369 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
370 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
371 if (ret) {
372 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
373 goto cleanup;
374 }
375
376 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
377 !(val & WIP), 0, SNFI_POLL_INTERVAL);
378 if (ret) {
379 snand_log_snfi(snf->pdev,
380 "Timed out waiting for WIP cleared\n");
381 }
382
383cleanup:
384 nfi_write32(snf, SNF_MAC_CTL, 0);
385
386 return ret;
387}
388
389int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
390 uint8_t *in, uint32_t inlen)
391{
392 int ret;
393
394 if (outlen + inlen > SNF_GPRAM_SIZE)
395 return -EINVAL;
396
397 mtk_snand_mac_reset(snf);
398
399 nfi_write_data(snf, SNF_GPRAM, out, outlen);
400
401 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
402 if (ret)
403 return ret;
404
405 if (!inlen)
406 return 0;
407
408 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
409
410 return 0;
411}
412
413static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
414{
415 uint8_t op[2], val;
416 int ret;
417
418 op[0] = SNAND_CMD_GET_FEATURE;
419 op[1] = (uint8_t)addr;
420
421 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
422 if (ret)
423 return ret;
424
425 return val;
426}
427
428int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
429{
430 uint8_t op[3];
431
432 op[0] = SNAND_CMD_SET_FEATURE;
433 op[1] = (uint8_t)addr;
434 op[2] = (uint8_t)val;
435
436 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
437}
438
439static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
440{
441 int val;
442 mtk_snand_time_t time_start, tmo;
443
444 time_start = timer_get_ticks();
445 tmo = timer_time_to_tick(wait_us);
446
447 do {
448 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
449 if (!(val & SNAND_STATUS_OIP))
450 return val & (SNAND_STATUS_ERASE_FAIL |
451 SNAND_STATUS_PROGRAM_FAIL);
452 } while (!timer_is_timeout(time_start, tmo));
453
454 return -ETIMEDOUT;
455}
456
457int mtk_snand_chip_reset(struct mtk_snand *snf)
458{
459 uint8_t op = SNAND_CMD_RESET;
460 int ret;
461
462 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
463 if (ret)
464 return ret;
465
466 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
467 if (ret < 0)
468 return ret;
469
470 return 0;
471}
472
473static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
474 uint8_t set)
475{
476 int val, newval;
477 int ret;
478
479 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
480 if (val < 0) {
481 snand_log_chip(snf->pdev,
482 "Failed to get configuration feature\n");
483 return val;
484 }
485
486 newval = (val & (~clr)) | set;
487
488 if (newval == val)
489 return 0;
490
491 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
492 (uint8_t)newval);
493 if (val < 0) {
494 snand_log_chip(snf->pdev,
495 "Failed to set configuration feature\n");
496 return ret;
497 }
498
499 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
500 if (val < 0) {
501 snand_log_chip(snf->pdev,
502 "Failed to get configuration feature\n");
503 return val;
504 }
505
506 if (newval != val)
507 return -ENOTSUPP;
508
509 return 0;
510}
511
512static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
513{
514 int ret;
515
516 if (enable)
517 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
518 else
519 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
520
521 if (ret) {
522 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
523 enable ? "enable" : "disable");
524 }
525
526 return ret;
527}
528
529static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
530{
531 int ret;
532
533 if (enable) {
534 ret = mtk_snand_config_feature(snf, 0,
535 SNAND_FEATURE_QUAD_ENABLE);
536 } else {
537 ret = mtk_snand_config_feature(snf,
538 SNAND_FEATURE_QUAD_ENABLE, 0);
539 }
540
541 if (ret) {
542 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
543 enable ? "enable" : "disable");
544 }
545
546 return ret;
547}
548
549static int mtk_snand_unlock(struct mtk_snand *snf)
550{
551 int ret;
552
553 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
554 if (ret) {
555 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
556 return ret;
557 }
558
559 return 0;
560}
561
562static int mtk_snand_write_enable(struct mtk_snand *snf)
563{
564 uint8_t op = SNAND_CMD_WRITE_ENABLE;
565 int ret, val;
566
567 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
568 if (ret)
569 return ret;
570
571 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
572 if (val < 0)
573 return ret;
574
575 if (val & SNAND_STATUS_WEL)
576 return 0;
577
578 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
579
580 return -ENOTSUPP;
581}
582
583static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
584{
585 if (!snf->select_die)
586 return 0;
587
588 return snf->select_die(snf, dieidx);
589}
590
591static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
592 uint64_t addr)
593{
594 uint32_t dieidx;
595
596 if (!snf->select_die)
597 return addr;
598
599 dieidx = addr >> snf->die_shift;
600
601 mtk_snand_select_die(snf, dieidx);
602
603 return addr & snf->die_mask;
604}
605
606static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
607 uint32_t page)
608{
609 uint32_t pages_per_block;
610
611 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
612
613 if (page & pages_per_block)
614 return 1 << (snf->writesize_shift + 1);
615
616 return 0;
617}
618
619static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
620{
621 uint8_t op[4];
622
623 op[0] = cmd;
624 op[1] = (page >> 16) & 0xff;
625 op[2] = (page >> 8) & 0xff;
626 op[3] = page & 0xff;
627
628 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
629}
630
631static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
632{
633 uint32_t vall, valm;
634 uint8_t *oobptr = buf;
635 int i, j;
636
637 for (i = 0; i < snf->ecc_steps; i++) {
638 vall = nfi_read32(snf, NFI_FDML(i));
639 valm = nfi_read32(snf, NFI_FDMM(i));
640
641 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
642 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
643
644 oobptr += snf->nfi_soc->fdm_size;
645 }
646}
647
developer4da1bed2021-05-08 17:30:37 +0800648static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
649 uint32_t sect, uint8_t *oob)
650{
651 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
652 uint32_t coladdr, raw_offs, offs;
653 uint8_t op[4];
654
655 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
656 snand_log_snfi(snf->pdev,
657 "ECC parity size does not fit the GPRAM\n");
658 return -ENOTSUPP;
659 }
660
661 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
662 snf->nfi_soc->fdm_size;
663 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
664
665 /* Column address with plane bit */
666 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
667
668 op[0] = SNAND_CMD_READ_FROM_CACHE;
669 op[1] = (coladdr >> 8) & 0xff;
670 op[2] = coladdr & 0xff;
671 op[3] = 0;
672
673 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
674}
675
676static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
677{
678 uint8_t *oob = snf->page_cache + snf->writesize;
679 int i, rc, ret = 0, max_bitflips = 0;
680
681 for (i = 0; i < snf->ecc_steps; i++) {
682 if (snf->sect_bf[i] >= 0) {
683 if (snf->sect_bf[i] > max_bitflips)
684 max_bitflips = snf->sect_bf[i];
685 continue;
686 }
687
688 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
689 if (rc)
690 return rc;
691
692 rc = mtk_ecc_fixup_empty_sector(snf, i);
693 if (rc < 0) {
694 ret = -EBADMSG;
695
696 snand_log_ecc(snf->pdev,
697 "Uncorrectable bitflips in page %u sect %u\n",
698 page, i);
699 } else {
700 snf->sect_bf[i] = rc;
701
702 if (snf->sect_bf[i] > max_bitflips)
703 max_bitflips = snf->sect_bf[i];
704
705 snand_log_ecc(snf->pdev,
706 "%u bitflip%s corrected in page %u sect %u\n",
707 rc, rc > 1 ? "s" : "", page, i);
708 }
709 }
710
711 return ret ? ret : max_bitflips;
712}
713
developerfd40db22021-04-29 10:08:25 +0800714static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
715{
716 uint32_t coladdr, rwbytes, mode, len;
717 uintptr_t dma_addr;
718 int ret;
719
720 /* Column address with plane bit */
721 coladdr = mtk_snand_get_plane_address(snf, page);
722
723 mtk_snand_mac_reset(snf);
724 mtk_nfi_reset(snf);
725
726 /* Command and dummy cycles */
727 nfi_write32(snf, SNF_RD_CTL2,
728 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
729 (snf->opcode_rfc << DATA_READ_CMD_S));
730
731 /* Column address */
732 nfi_write32(snf, SNF_RD_CTL3, coladdr);
733
734 /* Set read mode */
735 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
736 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, mode | DATARD_CUSTOM_EN);
737
738 /* Set bytes to read */
739 rwbytes = snf->ecc_steps * snf->raw_sector_size;
740 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
741 rwbytes);
742
743 /* NFI read prepare */
744 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
745 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
746 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
747
748 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
749
750 /* Prepare for DMA read */
751 len = snf->writesize + snf->oobsize;
752 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
753 if (ret) {
754 snand_log_nfi(snf->pdev,
755 "DMA map from device failed with %d\n", ret);
756 return ret;
757 }
758
759 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
760
761 if (!raw)
762 mtk_snand_ecc_decoder_start(snf);
763
764 /* Prepare for custom read interrupt */
765 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
766 irq_completion_init(snf->pdev);
767
768 /* Trigger NFI into custom mode */
769 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
770
771 /* Start DMA read */
772 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
773 nfi_write16(snf, NFI_STRDATA, STR_DATA);
774
775 /* Wait for operation finished */
776 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
777 CUS_READ_DONE, SNFI_POLL_INTERVAL);
778 if (ret) {
779 snand_log_nfi(snf->pdev,
780 "DMA timed out for reading from cache\n");
781 goto cleanup;
782 }
783
784 if (!raw) {
785 ret = mtk_ecc_wait_decoder_done(snf);
786 if (ret)
787 goto cleanup;
788
789 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
790
developer4da1bed2021-05-08 17:30:37 +0800791 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800792 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800793
794 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800795 }
796
797cleanup:
798 /* DMA cleanup */
799 dma_mem_unmap(snf->pdev, dma_addr, len, false);
800
801 /* Stop read */
802 nfi_write32(snf, NFI_CON, 0);
developer4b9635a2021-05-18 14:22:39 +0800803 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +0800804
805 /* Clear SNF done flag */
806 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
807 nfi_write32(snf, SNF_STA_CTL1, 0);
808
809 /* Disable interrupt */
810 nfi_read32(snf, NFI_INTR_STA);
811 nfi_write32(snf, NFI_INTR_EN, 0);
812
813 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
814
815 return ret;
816}
817
818static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
819{
820 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
821 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
822 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
823
824 for (i = 0; i < snf->ecc_steps; i++) {
825 raw_sector = snf->page_cache + i * snf->raw_sector_size;
826
827 if (buf) {
828 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
829 bufptr += snf->nfi_soc->sector_size;
830 }
831
832 raw_sector += snf->nfi_soc->sector_size;
833
834 if (oob) {
835 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
836 oobptr += snf->nfi_soc->fdm_size;
837 raw_sector += snf->nfi_soc->fdm_size;
838
839 memcpy(eccptr, raw_sector, ecc_bytes);
840 eccptr += ecc_bytes;
841 }
842 }
843}
844
845static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
846 void *buf, void *oob, bool raw, bool format)
847{
848 uint64_t die_addr;
849 uint32_t page;
850 int ret;
851
852 die_addr = mtk_snand_select_die_address(snf, addr);
853 page = die_addr >> snf->writesize_shift;
854
855 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
856 if (ret)
857 return ret;
858
859 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
860 if (ret < 0) {
861 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
862 return ret;
863 }
864
865 ret = mtk_snand_read_cache(snf, page, raw);
866 if (ret < 0 && ret != -EBADMSG)
867 return ret;
868
869 if (raw) {
870 if (format) {
871 mtk_snand_bm_swap_raw(snf);
872 mtk_snand_fdm_bm_swap_raw(snf);
873 mtk_snand_from_raw_page(snf, buf, oob);
874 } else {
875 if (buf)
876 memcpy(buf, snf->page_cache, snf->writesize);
877
878 if (oob) {
879 memset(oob, 0xff, snf->oobsize);
880 memcpy(oob, snf->page_cache + snf->writesize,
881 snf->ecc_steps * snf->spare_per_sector);
882 }
883 }
884 } else {
885 mtk_snand_bm_swap(snf);
886 mtk_snand_fdm_bm_swap(snf);
887
888 if (buf)
889 memcpy(buf, snf->page_cache, snf->writesize);
890
891 if (oob) {
892 memset(oob, 0xff, snf->oobsize);
893 memcpy(oob, snf->page_cache + snf->writesize,
894 snf->ecc_steps * snf->nfi_soc->fdm_size);
895 }
896 }
897
898 return ret;
899}
900
901int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
902 void *oob, bool raw)
903{
904 if (!snf || (!buf && !oob))
905 return -EINVAL;
906
907 if (addr >= snf->size)
908 return -EINVAL;
909
910 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
911}
912
913static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
914{
915 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
916 const uint8_t *oobptr = buf;
917 int i, j;
918
919 for (i = 0; i < snf->ecc_steps; i++) {
920 vall = 0;
921 valm = 0;
922
923 for (j = 0; j < 8; j++) {
924 if (j < 4)
925 vall |= (j < fdm_size ? oobptr[j] : 0xff)
926 << (j * 8);
927 else
928 valm |= (j < fdm_size ? oobptr[j] : 0xff)
929 << ((j - 4) * 8);
930 }
931
932 nfi_write32(snf, NFI_FDML(i), vall);
933 nfi_write32(snf, NFI_FDMM(i), valm);
934
935 oobptr += fdm_size;
936 }
937}
938
939static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
940 bool raw)
941{
942 uint32_t coladdr, rwbytes, mode, len;
943 uintptr_t dma_addr;
944 int ret;
945
946 /* Column address with plane bit */
947 coladdr = mtk_snand_get_plane_address(snf, page);
948
949 mtk_snand_mac_reset(snf);
950 mtk_nfi_reset(snf);
951
952 /* Write FDM registers if necessary */
953 if (!raw)
954 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
955
956 /* Command */
957 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
958
959 /* Column address */
960 nfi_write32(snf, SNF_PG_CTL2, coladdr);
961
962 /* Set write mode */
963 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
964 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
965
966 /* Set bytes to write */
967 rwbytes = snf->ecc_steps * snf->raw_sector_size;
968 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
969 rwbytes);
970
971 /* NFI write prepare */
972 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
973 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
974 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
975
976 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
977
978 /* Prepare for DMA write */
979 len = snf->writesize + snf->oobsize;
980 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
981 if (ret) {
982 snand_log_nfi(snf->pdev,
983 "DMA map to device failed with %d\n", ret);
984 return ret;
985 }
986
987 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
988
989 if (!raw)
990 mtk_snand_ecc_encoder_start(snf);
991
992 /* Prepare for custom write interrupt */
993 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
994 irq_completion_init(snf->pdev);
995
996 /* Trigger NFI into custom mode */
997 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
998
999 /* Start DMA write */
1000 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1001 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1002
1003 /* Wait for operation finished */
1004 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1005 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1006 if (ret) {
1007 snand_log_nfi(snf->pdev,
1008 "DMA timed out for program load\n");
1009 goto cleanup;
1010 }
1011
1012 if (!raw)
1013 mtk_snand_ecc_encoder_stop(snf);
1014
1015cleanup:
1016 /* DMA cleanup */
1017 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1018
1019 /* Stop write */
developer4b9635a2021-05-18 14:22:39 +08001020 nfi_write32(snf, NFI_CON, 0);
1021 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +08001022
1023 /* Clear SNF done flag */
1024 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1025 nfi_write32(snf, SNF_STA_CTL1, 0);
1026
1027 /* Disable interrupt */
1028 nfi_read32(snf, NFI_INTR_STA);
1029 nfi_write32(snf, NFI_INTR_EN, 0);
1030
1031 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1032
1033 return ret;
1034}
1035
1036static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1037 const void *buf, const void *oob,
1038 bool empty_ecc)
1039{
1040 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1041 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1042 const uint8_t *bufptr = buf, *oobptr = oob;
1043 uint8_t *raw_sector;
1044
1045 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1046 for (i = 0; i < snf->ecc_steps; i++) {
1047 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1048
1049 if (buf) {
1050 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1051 bufptr += snf->nfi_soc->sector_size;
1052 }
1053
1054 raw_sector += snf->nfi_soc->sector_size;
1055
1056 if (oob) {
1057 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1058 oobptr += snf->nfi_soc->fdm_size;
1059 raw_sector += snf->nfi_soc->fdm_size;
1060
1061 if (empty_ecc)
1062 memset(raw_sector, 0xff, ecc_bytes);
1063 else
1064 memcpy(raw_sector, eccptr, ecc_bytes);
1065 eccptr += ecc_bytes;
1066 }
1067 }
1068}
1069
1070static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1071 const void *oob)
1072{
1073 const uint8_t *p = buf;
1074 uint32_t i, j;
1075
1076 if (buf) {
1077 for (i = 0; i < snf->writesize; i++) {
1078 if (p[i] != 0xff)
1079 return false;
1080 }
1081 }
1082
1083 if (oob) {
1084 for (j = 0; j < snf->ecc_steps; j++) {
1085 p = oob + j * snf->nfi_soc->fdm_size;
1086
1087 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1088 if (p[i] != 0xff)
1089 return false;
1090 }
1091 }
1092 }
1093
1094 return true;
1095}
1096
1097static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1098 const void *buf, const void *oob,
1099 bool raw, bool format)
1100{
1101 uint64_t die_addr;
1102 bool empty_ecc = false;
1103 uint32_t page;
1104 int ret;
1105
1106 die_addr = mtk_snand_select_die_address(snf, addr);
1107 page = die_addr >> snf->writesize_shift;
1108
1109 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1110 /*
1111 * If the data in the page to be ecc-ed is full 0xff,
1112 * change to raw write mode
1113 */
1114 raw = true;
1115 format = true;
1116
1117 /* fill ecc parity code region with 0xff */
1118 empty_ecc = true;
1119 }
1120
1121 if (raw) {
1122 if (format) {
1123 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1124 mtk_snand_fdm_bm_swap_raw(snf);
1125 mtk_snand_bm_swap_raw(snf);
1126 } else {
1127 memset(snf->page_cache, 0xff,
1128 snf->writesize + snf->oobsize);
1129
1130 if (buf)
1131 memcpy(snf->page_cache, buf, snf->writesize);
1132
1133 if (oob) {
1134 memcpy(snf->page_cache + snf->writesize, oob,
1135 snf->ecc_steps * snf->spare_per_sector);
1136 }
1137 }
1138 } else {
1139 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1140 if (buf)
1141 memcpy(snf->page_cache, buf, snf->writesize);
1142
1143 if (oob) {
1144 memcpy(snf->page_cache + snf->writesize, oob,
1145 snf->ecc_steps * snf->nfi_soc->fdm_size);
1146 }
1147
1148 mtk_snand_fdm_bm_swap(snf);
1149 mtk_snand_bm_swap(snf);
1150 }
1151
1152 ret = mtk_snand_write_enable(snf);
1153 if (ret)
1154 return ret;
1155
1156 ret = mtk_snand_program_load(snf, page, raw);
1157 if (ret)
1158 return ret;
1159
1160 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1161 if (ret)
1162 return ret;
1163
1164 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1165 if (ret < 0) {
1166 snand_log_chip(snf->pdev,
1167 "Page program command timed out on page %u\n",
1168 page);
1169 return ret;
1170 }
1171
1172 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1173 snand_log_chip(snf->pdev,
1174 "Page program failed on page %u\n", page);
1175 return -EIO;
1176 }
1177
1178 return 0;
1179}
1180
1181int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1182 const void *oob, bool raw)
1183{
1184 if (!snf || (!buf && !oob))
1185 return -EINVAL;
1186
1187 if (addr >= snf->size)
1188 return -EINVAL;
1189
1190 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1191}
1192
1193int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1194{
1195 uint64_t die_addr;
1196 uint32_t page, block;
1197 int ret;
1198
1199 if (!snf)
1200 return -EINVAL;
1201
1202 if (addr >= snf->size)
1203 return -EINVAL;
1204
1205 die_addr = mtk_snand_select_die_address(snf, addr);
1206 block = die_addr >> snf->erasesize_shift;
1207 page = block << (snf->erasesize_shift - snf->writesize_shift);
1208
1209 ret = mtk_snand_write_enable(snf);
1210 if (ret)
1211 return ret;
1212
1213 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1214 if (ret)
1215 return ret;
1216
1217 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1218 if (ret < 0) {
1219 snand_log_chip(snf->pdev,
1220 "Block erase command timed out on block %u\n",
1221 block);
1222 return ret;
1223 }
1224
1225 if (ret & SNAND_STATUS_ERASE_FAIL) {
1226 snand_log_chip(snf->pdev,
1227 "Block erase failed on block %u\n", block);
1228 return -EIO;
1229 }
1230
1231 return 0;
1232}
1233
1234static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1235{
1236 int ret;
1237
1238 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1239 false);
1240 if (ret && ret != -EBADMSG)
1241 return ret;
1242
1243 return snf->buf_cache[0] != 0xff;
1244}
1245
1246static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1247{
1248 int ret;
1249
1250 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1251 true);
1252 if (ret && ret != -EBADMSG)
1253 return ret;
1254
1255 return snf->buf_cache[0] != 0xff;
1256}
1257
1258int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1259{
1260 if (!snf)
1261 return -EINVAL;
1262
1263 if (addr >= snf->size)
1264 return -EINVAL;
1265
1266 addr &= ~snf->erasesize_mask;
1267
1268 if (snf->nfi_soc->bbm_swap)
1269 return mtk_snand_block_isbad_std(snf, addr);
1270
1271 return mtk_snand_block_isbad_mtk(snf, addr);
1272}
1273
1274static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1275{
1276 /* Standard BBM position */
1277 memset(snf->buf_cache, 0xff, snf->oobsize);
1278 snf->buf_cache[0] = 0;
1279
1280 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1281 false);
1282}
1283
1284static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1285{
1286 /* Write the whole page with zeros */
1287 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1288
1289 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1290 snf->buf_cache + snf->writesize, true,
1291 true);
1292}
1293
1294int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1295{
1296 if (!snf)
1297 return -EINVAL;
1298
1299 if (addr >= snf->size)
1300 return -EINVAL;
1301
1302 addr &= ~snf->erasesize_mask;
1303
1304 if (snf->nfi_soc->bbm_swap)
1305 return mtk_snand_block_markbad_std(snf, addr);
1306
1307 return mtk_snand_block_markbad_mtk(snf, addr);
1308}
1309
1310int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1311 const uint8_t *oobbuf, size_t ooblen)
1312{
1313 size_t len = ooblen, sect_fdm_len;
1314 const uint8_t *oob = oobbuf;
1315 uint32_t step = 0;
1316
1317 if (!snf || !oobraw || !oob)
1318 return -EINVAL;
1319
1320 while (len && step < snf->ecc_steps) {
1321 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1322 if (sect_fdm_len > len)
1323 sect_fdm_len = len;
1324
1325 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1326 sect_fdm_len);
1327
1328 len -= sect_fdm_len;
1329 oob += sect_fdm_len;
1330 step++;
1331 }
1332
1333 return len;
1334}
1335
1336int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1337 size_t ooblen, const uint8_t *oobraw)
1338{
1339 size_t len = ooblen, sect_fdm_len;
1340 uint8_t *oob = oobbuf;
1341 uint32_t step = 0;
1342
1343 if (!snf || !oobraw || !oob)
1344 return -EINVAL;
1345
1346 while (len && step < snf->ecc_steps) {
1347 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1348 if (sect_fdm_len > len)
1349 sect_fdm_len = len;
1350
1351 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1352 sect_fdm_len);
1353
1354 len -= sect_fdm_len;
1355 oob += sect_fdm_len;
1356 step++;
1357 }
1358
1359 return len;
1360}
1361
1362int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1363 void *buf, void *oob, size_t ooblen,
1364 size_t *actualooblen, bool raw)
1365{
1366 int ret, oobremain;
1367
1368 if (!snf)
1369 return -EINVAL;
1370
1371 if (!oob)
1372 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1373
1374 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1375 if (ret && ret != -EBADMSG) {
1376 if (actualooblen)
1377 *actualooblen = 0;
1378 return ret;
1379 }
1380
1381 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1382 if (actualooblen)
1383 *actualooblen = ooblen - oobremain;
1384
1385 return ret;
1386}
1387
1388int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1389 const void *buf, const void *oob,
1390 size_t ooblen, size_t *actualooblen, bool raw)
1391{
1392 int oobremain;
1393
1394 if (!snf)
1395 return -EINVAL;
1396
1397 if (!oob)
1398 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1399
1400 memset(snf->buf_cache, 0xff, snf->oobsize);
1401 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1402 if (actualooblen)
1403 *actualooblen = ooblen - oobremain;
1404
1405 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1406}
1407
1408int mtk_snand_get_chip_info(struct mtk_snand *snf,
1409 struct mtk_snand_chip_info *info)
1410{
1411 if (!snf || !info)
1412 return -EINVAL;
1413
1414 info->model = snf->model;
1415 info->chipsize = snf->size;
1416 info->blocksize = snf->erasesize;
1417 info->pagesize = snf->writesize;
1418 info->sparesize = snf->oobsize;
1419 info->spare_per_sector = snf->spare_per_sector;
1420 info->fdm_size = snf->nfi_soc->fdm_size;
1421 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1422 info->num_sectors = snf->ecc_steps;
1423 info->sector_size = snf->nfi_soc->sector_size;
1424 info->ecc_strength = snf->ecc_strength;
1425 info->ecc_bytes = snf->ecc_bytes;
1426
1427 return 0;
1428}
1429
1430int mtk_snand_irq_process(struct mtk_snand *snf)
1431{
1432 uint32_t sta, ien;
1433
1434 if (!snf)
1435 return -EINVAL;
1436
1437 sta = nfi_read32(snf, NFI_INTR_STA);
1438 ien = nfi_read32(snf, NFI_INTR_EN);
1439
1440 if (!(sta & ien))
1441 return 0;
1442
1443 nfi_write32(snf, NFI_INTR_EN, 0);
1444 irq_completion_done(snf->pdev);
1445
1446 return 1;
1447}
1448
1449static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1450{
1451 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1452 int i, mul = 1;
1453
1454 /*
1455 * If we're using the 1KB sector size, HW will automatically
1456 * double the spare size. So we should only use half of the value.
1457 */
1458 if (snf->nfi_soc->sector_size == 1024)
1459 mul = 2;
1460
1461 spare_per_step /= mul;
1462
1463 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1464 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1465 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1466 snf->spare_per_sector *= mul;
1467 return i;
1468 }
1469 }
1470
1471 snand_log_nfi(snf->pdev,
1472 "Page size %u+%u is not supported\n", snf->writesize,
1473 snf->oobsize);
1474
1475 return -1;
1476}
1477
1478static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1479{
1480 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1481 uint32_t sector_size_512;
1482
1483 if (snf->nfi_soc->sector_size == 512) {
1484 sector_size_512 = NFI_SEC_SEL_512;
1485 spare_size_shift = NFI_SPARE_SIZE_S;
1486 } else {
1487 sector_size_512 = 0;
1488 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1489 }
1490
1491 switch (snf->writesize) {
1492 case SZ_512:
1493 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1494 break;
1495 case SZ_2K:
1496 if (snf->nfi_soc->sector_size == 512)
1497 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1498 else
1499 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1500 break;
1501 case SZ_4K:
1502 if (snf->nfi_soc->sector_size == 512)
1503 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1504 else
1505 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1506 break;
1507 case SZ_8K:
1508 if (snf->nfi_soc->sector_size == 512)
1509 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1510 else
1511 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1512 break;
1513 case SZ_16K:
1514 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1515 break;
1516 default:
1517 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1518 snf->writesize);
1519 return -ENOTSUPP;
1520 }
1521
1522 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1523 if (unlikely(spare_size_idx < 0))
1524 return -ENOTSUPP;
1525
1526 snf->raw_sector_size = snf->nfi_soc->sector_size +
1527 snf->spare_per_sector;
1528
1529 /* Setup page format */
1530 nfi_write32(snf, NFI_PAGEFMT,
1531 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1532 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1533 (spare_size_idx << spare_size_shift) |
1534 (pagesize_idx << NFI_PAGE_SIZE_S) |
1535 sector_size_512);
1536
1537 return 0;
1538}
1539
1540static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1541 uint32_t snfi_caps, uint8_t *opcode,
1542 uint8_t *dummy,
1543 const struct snand_io_cap *op_cap)
1544{
1545 uint32_t i, caps;
1546
1547 caps = snfi_caps & op_cap->caps;
1548
1549 i = fls(caps);
1550 if (i > 0) {
1551 *opcode = op_cap->opcodes[i - 1].opcode;
1552 if (dummy)
1553 *dummy = op_cap->opcodes[i - 1].dummy;
1554 return i - 1;
1555 }
1556
1557 return __SNAND_IO_MAX;
1558}
1559
1560static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1561 uint32_t snfi_caps,
1562 const struct snand_io_cap *op_cap)
1563{
1564 enum snand_flash_io idx;
1565
1566 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1567 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1568 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1569 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1570 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1571 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1572 };
1573
1574 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1575 &snf->dummy_rfc, op_cap);
1576 if (idx >= __SNAND_IO_MAX) {
1577 snand_log_snfi(snf->pdev,
1578 "No capable opcode for read from cache\n");
1579 return -ENOTSUPP;
1580 }
1581
1582 snf->mode_rfc = rfc_modes[idx];
1583
1584 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1585 snf->quad_spi_op = true;
1586
1587 return 0;
1588}
1589
1590static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1591 const struct snand_io_cap *op_cap)
1592{
1593 enum snand_flash_io idx;
1594
1595 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1596 [SNAND_IO_1_1_1] = 0,
1597 [SNAND_IO_1_1_4] = 1,
1598 };
1599
1600 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1601 NULL, op_cap);
1602 if (idx >= __SNAND_IO_MAX) {
1603 snand_log_snfi(snf->pdev,
1604 "No capable opcode for program load\n");
1605 return -ENOTSUPP;
1606 }
1607
1608 snf->mode_pl = pl_modes[idx];
1609
1610 if (idx == SNAND_IO_1_1_4)
1611 snf->quad_spi_op = true;
1612
1613 return 0;
1614}
1615
1616static int mtk_snand_setup(struct mtk_snand *snf,
1617 const struct snand_flash_info *snand_info)
1618{
1619 const struct snand_mem_org *memorg = &snand_info->memorg;
1620 uint32_t i, msg_size, snfi_caps;
1621 int ret;
1622
1623 /* Calculate flash memory organization */
1624 snf->model = snand_info->model;
1625 snf->writesize = memorg->pagesize;
1626 snf->oobsize = memorg->sparesize;
1627 snf->erasesize = snf->writesize * memorg->pages_per_block;
1628 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1629 snf->size = snf->die_size * memorg->ndies;
1630 snf->num_dies = memorg->ndies;
1631
1632 snf->writesize_mask = snf->writesize - 1;
1633 snf->erasesize_mask = snf->erasesize - 1;
1634 snf->die_mask = snf->die_size - 1;
1635
1636 snf->writesize_shift = ffs(snf->writesize) - 1;
1637 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1638 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1639
1640 snf->select_die = snand_info->select_die;
1641
1642 /* Determine opcodes for read from cache/program load */
1643 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1644 if (snf->snfi_quad_spi)
1645 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1646
1647 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1648 if (ret)
1649 return ret;
1650
1651 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1652 if (ret)
1653 return ret;
1654
1655 /* ECC and page format */
1656 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1657 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1658 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1659 snf->writesize);
1660 return -ENOTSUPP;
1661 }
1662
1663 ret = mtk_snand_pagefmt_setup(snf);
1664 if (ret)
1665 return ret;
1666
1667 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1668 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1669 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1670 msg_size);
1671 if (ret)
1672 return ret;
1673
1674 nfi_write16(snf, NFI_CNFG, 0);
1675
1676 /* Tuning options */
1677 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
1678 nfi_write32(snf, SNF_DLY_CTL3, (40 << SFCK_SAM_DLY_S));
1679
1680 /* Interrupts */
1681 nfi_read32(snf, NFI_INTR_STA);
1682 nfi_write32(snf, NFI_INTR_EN, 0);
1683
1684 /* Clear SNF done flag */
1685 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1686 nfi_write32(snf, SNF_STA_CTL1, 0);
1687
1688 /* Initialization on all dies */
1689 for (i = 0; i < snf->num_dies; i++) {
1690 mtk_snand_select_die(snf, i);
1691
1692 /* Disable On-Die ECC engine */
1693 ret = mtk_snand_ondie_ecc_control(snf, false);
1694 if (ret)
1695 return ret;
1696
1697 /* Disable block protection */
1698 mtk_snand_unlock(snf);
1699
1700 /* Enable/disable quad-spi */
1701 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1702 }
1703
1704 mtk_snand_select_die(snf, 0);
1705
1706 return 0;
1707}
1708
1709static int mtk_snand_id_probe(struct mtk_snand *snf,
1710 const struct snand_flash_info **snand_info)
1711{
1712 uint8_t id[4], op[2];
1713 int ret;
1714
1715 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1716 op[0] = SNAND_CMD_READID;
1717 op[1] = 0;
1718 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1719 if (ret)
1720 return ret;
1721
1722 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1723 if (*snand_info)
1724 return 0;
1725
1726 /* Read SPI-NAND JEDEC ID, OP + ID */
1727 op[0] = SNAND_CMD_READID;
1728 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1729 if (ret)
1730 return ret;
1731
1732 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1733 if (*snand_info)
1734 return 0;
1735
1736 snand_log_chip(snf->pdev,
1737 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1738 id[0], id[1], id[2], id[3]);
1739
1740 return -EINVAL;
1741}
1742
1743int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1744 struct mtk_snand **psnf)
1745{
1746 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001747 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001748 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001749 int ret;
1750
1751 if (!pdata || !psnf)
1752 return -EINVAL;
1753
1754 if (pdata->soc >= __SNAND_SOC_MAX) {
1755 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1756 pdata->soc);
1757 return -EINVAL;
1758 }
1759
1760 /* Dummy instance only for initial reset and id probe */
1761 tmpsnf.nfi_base = pdata->nfi_base;
1762 tmpsnf.ecc_base = pdata->ecc_base;
1763 tmpsnf.soc = pdata->soc;
1764 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1765 tmpsnf.pdev = dev;
1766
1767 /* Switch to SNFI mode */
1768 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1769
1770 /* Reset SNFI & NFI */
1771 mtk_snand_mac_reset(&tmpsnf);
1772 mtk_nfi_reset(&tmpsnf);
1773
1774 /* Reset SPI-NAND chip */
1775 ret = mtk_snand_chip_reset(&tmpsnf);
1776 if (ret) {
1777 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1778 return ret;
1779 }
1780
1781 /* Probe SPI-NAND flash by JEDEC ID */
1782 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1783 if (ret)
1784 return ret;
1785
1786 rawpage_size = snand_info->memorg.pagesize +
1787 snand_info->memorg.sparesize;
1788
developer4da1bed2021-05-08 17:30:37 +08001789 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1790 sizeof(*snf->sect_bf);
1791
developerfd40db22021-04-29 10:08:25 +08001792 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001793 snf = generic_mem_alloc(dev,
1794 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001795 if (!snf) {
1796 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1797 return -ENOMEM;
1798 }
1799
developer4da1bed2021-05-08 17:30:37 +08001800 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1801 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001802
1803 /* Allocate memory for DMA buffer */
1804 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1805 if (!snf->page_cache) {
1806 generic_mem_free(dev, snf);
1807 snand_log_chip(dev,
1808 "Failed to allocate memory for DMA buffer\n");
1809 return -ENOMEM;
1810 }
1811
1812 /* Fill up instance */
1813 snf->pdev = dev;
1814 snf->nfi_base = pdata->nfi_base;
1815 snf->ecc_base = pdata->ecc_base;
1816 snf->soc = pdata->soc;
1817 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1818 snf->snfi_quad_spi = pdata->quad_spi;
1819
1820 /* Initialize SNFI & ECC engine */
1821 ret = mtk_snand_setup(snf, snand_info);
1822 if (ret) {
1823 dma_mem_free(dev, snf->page_cache);
1824 generic_mem_free(dev, snf);
1825 return ret;
1826 }
1827
1828 *psnf = snf;
1829
1830 return 0;
1831}
1832
1833int mtk_snand_cleanup(struct mtk_snand *snf)
1834{
1835 if (!snf)
1836 return 0;
1837
1838 dma_mem_free(snf->pdev, snf->page_cache);
1839 generic_mem_free(snf->pdev, snf);
1840
1841 return 0;
1842}