blob: f305bcb9885b73cc96022a6e00467a76cab5e223 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
60#define NFI_STRADDR 0x080
61
62#define NFI_FDM0L 0x0a0
63#define NFI_FDM0M 0x0a4
64#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
65#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
66
67#define NFI_DEBUG_CON1 0x220
68#define WBUF_EN BIT(2)
69
70#define NFI_MASTERSTA 0x224
71#define MAS_ADDR GENMASK(11, 9)
72#define MAS_RD GENMASK(8, 6)
73#define MAS_WR GENMASK(5, 3)
74#define MAS_RDDLY GENMASK(2, 0)
75#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
76#define AHB_BUS_BUSY BIT(1)
77#define BUS_BUSY BIT(0)
78#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
79
80/* SNFI registers */
81#define SNF_MAC_CTL 0x500
82#define MAC_XIO_SEL BIT(4)
83#define SF_MAC_EN BIT(3)
84#define SF_TRIG BIT(2)
85#define WIP_READY BIT(1)
86#define WIP BIT(0)
87
88#define SNF_MAC_OUTL 0x504
89#define SNF_MAC_INL 0x508
90
91#define SNF_RD_CTL2 0x510
92#define DATA_READ_DUMMY_S 8
93#define DATA_READ_CMD_S 0
94
95#define SNF_RD_CTL3 0x514
96
97#define SNF_PG_CTL1 0x524
98#define PG_LOAD_CMD_S 8
99
100#define SNF_PG_CTL2 0x528
101
102#define SNF_MISC_CTL 0x538
103#define SW_RST BIT(28)
104#define FIFO_RD_LTC_S 25
105#define PG_LOAD_X4_EN BIT(20)
106#define DATA_READ_MODE_S 16
107#define DATA_READ_MODE GENMASK(18, 16)
108#define DATA_READ_MODE_X1 0
109#define DATA_READ_MODE_X2 1
110#define DATA_READ_MODE_X4 2
111#define DATA_READ_MODE_DUAL 5
112#define DATA_READ_MODE_QUAD 6
113#define PG_LOAD_CUSTOM_EN BIT(7)
114#define DATARD_CUSTOM_EN BIT(6)
115#define CS_DESELECT_CYC_S 0
116
117#define SNF_MISC_CTL2 0x53c
118#define PROGRAM_LOAD_BYTE_NUM_S 16
119#define READ_DATA_BYTE_NUM_S 11
120
121#define SNF_DLY_CTL3 0x548
122#define SFCK_SAM_DLY_S 0
123
124#define SNF_STA_CTL1 0x550
125#define CUS_PG_DONE BIT(28)
126#define CUS_READ_DONE BIT(27)
127#define SPI_STATE_S 0
128#define SPI_STATE GENMASK(3, 0)
129
130#define SNF_CFG 0x55c
131#define SPI_MODE BIT(0)
132
133#define SNF_GPRAM 0x800
134#define SNF_GPRAM_SIZE 0xa0
135
136#define SNFI_POLL_INTERVAL 1000000
137
138static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
139
140static const uint8_t mt7986_spare_sizes[] = {
141 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
142 67, 74
143};
144
145static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
146 [SNAND_SOC_MT7622] = {
147 .sector_size = 512,
148 .max_sectors = 8,
149 .fdm_size = 8,
150 .fdm_ecc_size = 1,
151 .fifo_size = 32,
152 .bbm_swap = false,
153 .empty_page_check = false,
154 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
155 .spare_sizes = mt7622_spare_sizes,
156 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
157 },
158 [SNAND_SOC_MT7629] = {
159 .sector_size = 512,
160 .max_sectors = 8,
161 .fdm_size = 8,
162 .fdm_ecc_size = 1,
163 .fifo_size = 32,
164 .bbm_swap = true,
165 .empty_page_check = false,
166 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
167 .spare_sizes = mt7622_spare_sizes,
168 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
169 },
170 [SNAND_SOC_MT7986] = {
171 .sector_size = 1024,
172 .max_sectors = 16,
173 .fdm_size = 8,
174 .fdm_ecc_size = 1,
175 .fifo_size = 64,
176 .bbm_swap = true,
177 .empty_page_check = true,
178 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
179 .spare_sizes = mt7986_spare_sizes,
180 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
181 },
182};
183
184static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
185{
186 return readl(snf->nfi_base + reg);
187}
188
189static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
190 uint32_t val)
191{
192 writel(val, snf->nfi_base + reg);
193}
194
195static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
196 uint16_t val)
197{
198 writew(val, snf->nfi_base + reg);
199}
200
201static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
202 uint32_t set)
203{
204 uint32_t val;
205
206 val = readl(snf->nfi_base + reg);
207 val &= ~clr;
208 val |= set;
209 writel(val, snf->nfi_base + reg);
210}
211
212static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
213 const uint8_t *data, uint32_t len)
214{
215 uint32_t i, val = 0, es = sizeof(uint32_t);
216
217 for (i = reg; i < reg + len; i++) {
218 val |= ((uint32_t)*data++) << (8 * (i % es));
219
220 if (i % es == es - 1 || i == reg + len - 1) {
221 nfi_write32(snf, i & ~(es - 1), val);
222 val = 0;
223 }
224 }
225}
226
227static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
228 uint32_t len)
229{
230 uint32_t i, val = 0, es = sizeof(uint32_t);
231
232 for (i = reg; i < reg + len; i++) {
233 if (i == reg || i % es == 0)
234 val = nfi_read32(snf, i & ~(es - 1));
235
236 *data++ = (uint8_t)(val >> (8 * (i % es)));
237 }
238}
239
240static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
241{
242 uint8_t tmp = *bm1;
243 *bm1 = *bm2;
244 *bm2 = tmp;
245}
246
247static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
248{
249 uint32_t fdm_bbm_pos;
250
251 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
252 return;
253
254 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
255 snf->nfi_soc->sector_size;
256 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
257 &snf->page_cache[snf->writesize]);
258}
259
260static void mtk_snand_bm_swap(struct mtk_snand *snf)
261{
262 uint32_t buf_bbm_pos, fdm_bbm_pos;
263
264 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
265 return;
266
267 buf_bbm_pos = snf->writesize -
268 (snf->ecc_steps - 1) * snf->spare_per_sector;
269 fdm_bbm_pos = snf->writesize +
270 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
271 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
272 &snf->page_cache[buf_bbm_pos]);
273}
274
275static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
276{
277 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
278
279 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
280 return;
281
282 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
283 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
284 snf->nfi_soc->sector_size;
285 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
286 &snf->page_cache[fdm_bbm_pos2]);
287}
288
289static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
290{
291 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
292
293 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
294 return;
295
296 fdm_bbm_pos1 = snf->writesize;
297 fdm_bbm_pos2 = snf->writesize +
298 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
299 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
300 &snf->page_cache[fdm_bbm_pos2]);
301}
302
303static int mtk_nfi_reset(struct mtk_snand *snf)
304{
305 uint32_t val, fifo_mask;
306 int ret;
307
308 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
309
310 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
311 !(val & snf->nfi_soc->mastersta_mask), 0,
312 SNFI_POLL_INTERVAL);
313 if (ret) {
314 snand_log_nfi(snf->pdev,
315 "NFI master is still busy after reset\n");
316 return ret;
317 }
318
319 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
320 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
321 SNFI_POLL_INTERVAL);
322 if (ret) {
323 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
324 return ret;
325 }
326
327 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
328 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
329 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
330 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
331 if (ret) {
332 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
333 return ret;
334 }
335
336 return 0;
337}
338
339static int mtk_snand_mac_reset(struct mtk_snand *snf)
340{
341 int ret;
342 uint32_t val;
343
344 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
345
346 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
347 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
348 if (ret)
349 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
350
351 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
352 (10 << CS_DESELECT_CYC_S));
353
354 return ret;
355}
356
357static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
358 uint32_t inlen)
359{
360 int ret;
361 uint32_t val;
362
363 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
364 nfi_write32(snf, SNF_MAC_OUTL, outlen);
365 nfi_write32(snf, SNF_MAC_INL, inlen);
366
367 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
368
369 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
370 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
371 if (ret) {
372 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
373 goto cleanup;
374 }
375
376 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
377 !(val & WIP), 0, SNFI_POLL_INTERVAL);
378 if (ret) {
379 snand_log_snfi(snf->pdev,
380 "Timed out waiting for WIP cleared\n");
381 }
382
383cleanup:
384 nfi_write32(snf, SNF_MAC_CTL, 0);
385
386 return ret;
387}
388
389int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
390 uint8_t *in, uint32_t inlen)
391{
392 int ret;
393
394 if (outlen + inlen > SNF_GPRAM_SIZE)
395 return -EINVAL;
396
397 mtk_snand_mac_reset(snf);
398
399 nfi_write_data(snf, SNF_GPRAM, out, outlen);
400
401 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
402 if (ret)
403 return ret;
404
405 if (!inlen)
406 return 0;
407
408 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
409
410 return 0;
411}
412
413static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
414{
415 uint8_t op[2], val;
416 int ret;
417
418 op[0] = SNAND_CMD_GET_FEATURE;
419 op[1] = (uint8_t)addr;
420
421 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
422 if (ret)
423 return ret;
424
425 return val;
426}
427
428int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
429{
430 uint8_t op[3];
431
432 op[0] = SNAND_CMD_SET_FEATURE;
433 op[1] = (uint8_t)addr;
434 op[2] = (uint8_t)val;
435
436 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
437}
438
439static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
440{
441 int val;
442 mtk_snand_time_t time_start, tmo;
443
444 time_start = timer_get_ticks();
445 tmo = timer_time_to_tick(wait_us);
446
447 do {
448 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
449 if (!(val & SNAND_STATUS_OIP))
450 return val & (SNAND_STATUS_ERASE_FAIL |
451 SNAND_STATUS_PROGRAM_FAIL);
452 } while (!timer_is_timeout(time_start, tmo));
453
454 return -ETIMEDOUT;
455}
456
457int mtk_snand_chip_reset(struct mtk_snand *snf)
458{
459 uint8_t op = SNAND_CMD_RESET;
460 int ret;
461
462 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
463 if (ret)
464 return ret;
465
466 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
467 if (ret < 0)
468 return ret;
469
470 return 0;
471}
472
473static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
474 uint8_t set)
475{
476 int val, newval;
477 int ret;
478
479 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
480 if (val < 0) {
481 snand_log_chip(snf->pdev,
482 "Failed to get configuration feature\n");
483 return val;
484 }
485
486 newval = (val & (~clr)) | set;
487
488 if (newval == val)
489 return 0;
490
491 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
492 (uint8_t)newval);
493 if (val < 0) {
494 snand_log_chip(snf->pdev,
495 "Failed to set configuration feature\n");
496 return ret;
497 }
498
499 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
500 if (val < 0) {
501 snand_log_chip(snf->pdev,
502 "Failed to get configuration feature\n");
503 return val;
504 }
505
506 if (newval != val)
507 return -ENOTSUPP;
508
509 return 0;
510}
511
512static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
513{
514 int ret;
515
516 if (enable)
517 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
518 else
519 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
520
521 if (ret) {
522 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
523 enable ? "enable" : "disable");
524 }
525
526 return ret;
527}
528
529static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
530{
531 int ret;
532
533 if (enable) {
534 ret = mtk_snand_config_feature(snf, 0,
535 SNAND_FEATURE_QUAD_ENABLE);
536 } else {
537 ret = mtk_snand_config_feature(snf,
538 SNAND_FEATURE_QUAD_ENABLE, 0);
539 }
540
541 if (ret) {
542 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
543 enable ? "enable" : "disable");
544 }
545
546 return ret;
547}
548
549static int mtk_snand_unlock(struct mtk_snand *snf)
550{
551 int ret;
552
553 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
554 if (ret) {
555 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
556 return ret;
557 }
558
559 return 0;
560}
561
562static int mtk_snand_write_enable(struct mtk_snand *snf)
563{
564 uint8_t op = SNAND_CMD_WRITE_ENABLE;
565 int ret, val;
566
567 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
568 if (ret)
569 return ret;
570
571 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
572 if (val < 0)
573 return ret;
574
575 if (val & SNAND_STATUS_WEL)
576 return 0;
577
578 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
579
580 return -ENOTSUPP;
581}
582
583static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
584{
585 if (!snf->select_die)
586 return 0;
587
588 return snf->select_die(snf, dieidx);
589}
590
591static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
592 uint64_t addr)
593{
594 uint32_t dieidx;
595
596 if (!snf->select_die)
597 return addr;
598
599 dieidx = addr >> snf->die_shift;
600
601 mtk_snand_select_die(snf, dieidx);
602
603 return addr & snf->die_mask;
604}
605
606static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
607 uint32_t page)
608{
609 uint32_t pages_per_block;
610
611 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
612
613 if (page & pages_per_block)
614 return 1 << (snf->writesize_shift + 1);
615
616 return 0;
617}
618
619static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
620{
621 uint8_t op[4];
622
623 op[0] = cmd;
624 op[1] = (page >> 16) & 0xff;
625 op[2] = (page >> 8) & 0xff;
626 op[3] = page & 0xff;
627
628 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
629}
630
631static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
632{
633 uint32_t vall, valm;
634 uint8_t *oobptr = buf;
635 int i, j;
636
637 for (i = 0; i < snf->ecc_steps; i++) {
638 vall = nfi_read32(snf, NFI_FDML(i));
639 valm = nfi_read32(snf, NFI_FDMM(i));
640
641 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
642 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
643
644 oobptr += snf->nfi_soc->fdm_size;
645 }
646}
647
developer4da1bed2021-05-08 17:30:37 +0800648static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
649 uint32_t sect, uint8_t *oob)
650{
651 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
652 uint32_t coladdr, raw_offs, offs;
653 uint8_t op[4];
654
655 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
656 snand_log_snfi(snf->pdev,
657 "ECC parity size does not fit the GPRAM\n");
658 return -ENOTSUPP;
659 }
660
661 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
662 snf->nfi_soc->fdm_size;
663 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
664
665 /* Column address with plane bit */
666 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
667
668 op[0] = SNAND_CMD_READ_FROM_CACHE;
669 op[1] = (coladdr >> 8) & 0xff;
670 op[2] = coladdr & 0xff;
671 op[3] = 0;
672
673 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
674}
675
676static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
677{
678 uint8_t *oob = snf->page_cache + snf->writesize;
679 int i, rc, ret = 0, max_bitflips = 0;
680
681 for (i = 0; i < snf->ecc_steps; i++) {
682 if (snf->sect_bf[i] >= 0) {
683 if (snf->sect_bf[i] > max_bitflips)
684 max_bitflips = snf->sect_bf[i];
685 continue;
686 }
687
688 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
689 if (rc)
690 return rc;
691
692 rc = mtk_ecc_fixup_empty_sector(snf, i);
693 if (rc < 0) {
694 ret = -EBADMSG;
695
696 snand_log_ecc(snf->pdev,
697 "Uncorrectable bitflips in page %u sect %u\n",
698 page, i);
699 } else {
700 snf->sect_bf[i] = rc;
701
702 if (snf->sect_bf[i] > max_bitflips)
703 max_bitflips = snf->sect_bf[i];
704
705 snand_log_ecc(snf->pdev,
706 "%u bitflip%s corrected in page %u sect %u\n",
707 rc, rc > 1 ? "s" : "", page, i);
708 }
709 }
710
711 return ret ? ret : max_bitflips;
712}
713
developerfd40db22021-04-29 10:08:25 +0800714static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
715{
716 uint32_t coladdr, rwbytes, mode, len;
717 uintptr_t dma_addr;
718 int ret;
719
720 /* Column address with plane bit */
721 coladdr = mtk_snand_get_plane_address(snf, page);
722
723 mtk_snand_mac_reset(snf);
724 mtk_nfi_reset(snf);
725
726 /* Command and dummy cycles */
727 nfi_write32(snf, SNF_RD_CTL2,
728 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
729 (snf->opcode_rfc << DATA_READ_CMD_S));
730
731 /* Column address */
732 nfi_write32(snf, SNF_RD_CTL3, coladdr);
733
734 /* Set read mode */
735 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
736 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, mode | DATARD_CUSTOM_EN);
737
738 /* Set bytes to read */
739 rwbytes = snf->ecc_steps * snf->raw_sector_size;
740 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
741 rwbytes);
742
743 /* NFI read prepare */
744 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
745 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
746 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
747
748 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
749
750 /* Prepare for DMA read */
751 len = snf->writesize + snf->oobsize;
752 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
753 if (ret) {
754 snand_log_nfi(snf->pdev,
755 "DMA map from device failed with %d\n", ret);
756 return ret;
757 }
758
759 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
760
761 if (!raw)
762 mtk_snand_ecc_decoder_start(snf);
763
764 /* Prepare for custom read interrupt */
765 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
766 irq_completion_init(snf->pdev);
767
768 /* Trigger NFI into custom mode */
769 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
770
771 /* Start DMA read */
772 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
773 nfi_write16(snf, NFI_STRDATA, STR_DATA);
774
775 /* Wait for operation finished */
776 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
777 CUS_READ_DONE, SNFI_POLL_INTERVAL);
778 if (ret) {
779 snand_log_nfi(snf->pdev,
780 "DMA timed out for reading from cache\n");
781 goto cleanup;
782 }
783
784 if (!raw) {
785 ret = mtk_ecc_wait_decoder_done(snf);
786 if (ret)
787 goto cleanup;
788
789 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
790
developer4da1bed2021-05-08 17:30:37 +0800791 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800792 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800793
794 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800795 }
796
797cleanup:
798 /* DMA cleanup */
799 dma_mem_unmap(snf->pdev, dma_addr, len, false);
800
801 /* Stop read */
802 nfi_write32(snf, NFI_CON, 0);
803
804 /* Clear SNF done flag */
805 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
806 nfi_write32(snf, SNF_STA_CTL1, 0);
807
808 /* Disable interrupt */
809 nfi_read32(snf, NFI_INTR_STA);
810 nfi_write32(snf, NFI_INTR_EN, 0);
811
812 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
813
814 return ret;
815}
816
817static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
818{
819 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
820 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
821 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
822
823 for (i = 0; i < snf->ecc_steps; i++) {
824 raw_sector = snf->page_cache + i * snf->raw_sector_size;
825
826 if (buf) {
827 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
828 bufptr += snf->nfi_soc->sector_size;
829 }
830
831 raw_sector += snf->nfi_soc->sector_size;
832
833 if (oob) {
834 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
835 oobptr += snf->nfi_soc->fdm_size;
836 raw_sector += snf->nfi_soc->fdm_size;
837
838 memcpy(eccptr, raw_sector, ecc_bytes);
839 eccptr += ecc_bytes;
840 }
841 }
842}
843
844static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
845 void *buf, void *oob, bool raw, bool format)
846{
847 uint64_t die_addr;
848 uint32_t page;
849 int ret;
850
851 die_addr = mtk_snand_select_die_address(snf, addr);
852 page = die_addr >> snf->writesize_shift;
853
854 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
855 if (ret)
856 return ret;
857
858 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
859 if (ret < 0) {
860 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
861 return ret;
862 }
863
864 ret = mtk_snand_read_cache(snf, page, raw);
865 if (ret < 0 && ret != -EBADMSG)
866 return ret;
867
868 if (raw) {
869 if (format) {
870 mtk_snand_bm_swap_raw(snf);
871 mtk_snand_fdm_bm_swap_raw(snf);
872 mtk_snand_from_raw_page(snf, buf, oob);
873 } else {
874 if (buf)
875 memcpy(buf, snf->page_cache, snf->writesize);
876
877 if (oob) {
878 memset(oob, 0xff, snf->oobsize);
879 memcpy(oob, snf->page_cache + snf->writesize,
880 snf->ecc_steps * snf->spare_per_sector);
881 }
882 }
883 } else {
884 mtk_snand_bm_swap(snf);
885 mtk_snand_fdm_bm_swap(snf);
886
887 if (buf)
888 memcpy(buf, snf->page_cache, snf->writesize);
889
890 if (oob) {
891 memset(oob, 0xff, snf->oobsize);
892 memcpy(oob, snf->page_cache + snf->writesize,
893 snf->ecc_steps * snf->nfi_soc->fdm_size);
894 }
895 }
896
897 return ret;
898}
899
900int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
901 void *oob, bool raw)
902{
903 if (!snf || (!buf && !oob))
904 return -EINVAL;
905
906 if (addr >= snf->size)
907 return -EINVAL;
908
909 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
910}
911
912static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
913{
914 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
915 const uint8_t *oobptr = buf;
916 int i, j;
917
918 for (i = 0; i < snf->ecc_steps; i++) {
919 vall = 0;
920 valm = 0;
921
922 for (j = 0; j < 8; j++) {
923 if (j < 4)
924 vall |= (j < fdm_size ? oobptr[j] : 0xff)
925 << (j * 8);
926 else
927 valm |= (j < fdm_size ? oobptr[j] : 0xff)
928 << ((j - 4) * 8);
929 }
930
931 nfi_write32(snf, NFI_FDML(i), vall);
932 nfi_write32(snf, NFI_FDMM(i), valm);
933
934 oobptr += fdm_size;
935 }
936}
937
938static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
939 bool raw)
940{
941 uint32_t coladdr, rwbytes, mode, len;
942 uintptr_t dma_addr;
943 int ret;
944
945 /* Column address with plane bit */
946 coladdr = mtk_snand_get_plane_address(snf, page);
947
948 mtk_snand_mac_reset(snf);
949 mtk_nfi_reset(snf);
950
951 /* Write FDM registers if necessary */
952 if (!raw)
953 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
954
955 /* Command */
956 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
957
958 /* Column address */
959 nfi_write32(snf, SNF_PG_CTL2, coladdr);
960
961 /* Set write mode */
962 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
963 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
964
965 /* Set bytes to write */
966 rwbytes = snf->ecc_steps * snf->raw_sector_size;
967 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
968 rwbytes);
969
970 /* NFI write prepare */
971 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
972 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
973 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
974
975 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
976
977 /* Prepare for DMA write */
978 len = snf->writesize + snf->oobsize;
979 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
980 if (ret) {
981 snand_log_nfi(snf->pdev,
982 "DMA map to device failed with %d\n", ret);
983 return ret;
984 }
985
986 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
987
988 if (!raw)
989 mtk_snand_ecc_encoder_start(snf);
990
991 /* Prepare for custom write interrupt */
992 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
993 irq_completion_init(snf->pdev);
994
995 /* Trigger NFI into custom mode */
996 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
997
998 /* Start DMA write */
999 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1000 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1001
1002 /* Wait for operation finished */
1003 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1004 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1005 if (ret) {
1006 snand_log_nfi(snf->pdev,
1007 "DMA timed out for program load\n");
1008 goto cleanup;
1009 }
1010
1011 if (!raw)
1012 mtk_snand_ecc_encoder_stop(snf);
1013
1014cleanup:
1015 /* DMA cleanup */
1016 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1017
1018 /* Stop write */
1019 nfi_write16(snf, NFI_CON, 0);
1020
1021 /* Clear SNF done flag */
1022 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1023 nfi_write32(snf, SNF_STA_CTL1, 0);
1024
1025 /* Disable interrupt */
1026 nfi_read32(snf, NFI_INTR_STA);
1027 nfi_write32(snf, NFI_INTR_EN, 0);
1028
1029 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1030
1031 return ret;
1032}
1033
1034static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1035 const void *buf, const void *oob,
1036 bool empty_ecc)
1037{
1038 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1039 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1040 const uint8_t *bufptr = buf, *oobptr = oob;
1041 uint8_t *raw_sector;
1042
1043 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1044 for (i = 0; i < snf->ecc_steps; i++) {
1045 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1046
1047 if (buf) {
1048 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1049 bufptr += snf->nfi_soc->sector_size;
1050 }
1051
1052 raw_sector += snf->nfi_soc->sector_size;
1053
1054 if (oob) {
1055 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1056 oobptr += snf->nfi_soc->fdm_size;
1057 raw_sector += snf->nfi_soc->fdm_size;
1058
1059 if (empty_ecc)
1060 memset(raw_sector, 0xff, ecc_bytes);
1061 else
1062 memcpy(raw_sector, eccptr, ecc_bytes);
1063 eccptr += ecc_bytes;
1064 }
1065 }
1066}
1067
1068static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1069 const void *oob)
1070{
1071 const uint8_t *p = buf;
1072 uint32_t i, j;
1073
1074 if (buf) {
1075 for (i = 0; i < snf->writesize; i++) {
1076 if (p[i] != 0xff)
1077 return false;
1078 }
1079 }
1080
1081 if (oob) {
1082 for (j = 0; j < snf->ecc_steps; j++) {
1083 p = oob + j * snf->nfi_soc->fdm_size;
1084
1085 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1086 if (p[i] != 0xff)
1087 return false;
1088 }
1089 }
1090 }
1091
1092 return true;
1093}
1094
1095static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1096 const void *buf, const void *oob,
1097 bool raw, bool format)
1098{
1099 uint64_t die_addr;
1100 bool empty_ecc = false;
1101 uint32_t page;
1102 int ret;
1103
1104 die_addr = mtk_snand_select_die_address(snf, addr);
1105 page = die_addr >> snf->writesize_shift;
1106
1107 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1108 /*
1109 * If the data in the page to be ecc-ed is full 0xff,
1110 * change to raw write mode
1111 */
1112 raw = true;
1113 format = true;
1114
1115 /* fill ecc parity code region with 0xff */
1116 empty_ecc = true;
1117 }
1118
1119 if (raw) {
1120 if (format) {
1121 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1122 mtk_snand_fdm_bm_swap_raw(snf);
1123 mtk_snand_bm_swap_raw(snf);
1124 } else {
1125 memset(snf->page_cache, 0xff,
1126 snf->writesize + snf->oobsize);
1127
1128 if (buf)
1129 memcpy(snf->page_cache, buf, snf->writesize);
1130
1131 if (oob) {
1132 memcpy(snf->page_cache + snf->writesize, oob,
1133 snf->ecc_steps * snf->spare_per_sector);
1134 }
1135 }
1136 } else {
1137 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1138 if (buf)
1139 memcpy(snf->page_cache, buf, snf->writesize);
1140
1141 if (oob) {
1142 memcpy(snf->page_cache + snf->writesize, oob,
1143 snf->ecc_steps * snf->nfi_soc->fdm_size);
1144 }
1145
1146 mtk_snand_fdm_bm_swap(snf);
1147 mtk_snand_bm_swap(snf);
1148 }
1149
1150 ret = mtk_snand_write_enable(snf);
1151 if (ret)
1152 return ret;
1153
1154 ret = mtk_snand_program_load(snf, page, raw);
1155 if (ret)
1156 return ret;
1157
1158 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1159 if (ret)
1160 return ret;
1161
1162 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1163 if (ret < 0) {
1164 snand_log_chip(snf->pdev,
1165 "Page program command timed out on page %u\n",
1166 page);
1167 return ret;
1168 }
1169
1170 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1171 snand_log_chip(snf->pdev,
1172 "Page program failed on page %u\n", page);
1173 return -EIO;
1174 }
1175
1176 return 0;
1177}
1178
1179int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1180 const void *oob, bool raw)
1181{
1182 if (!snf || (!buf && !oob))
1183 return -EINVAL;
1184
1185 if (addr >= snf->size)
1186 return -EINVAL;
1187
1188 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1189}
1190
1191int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1192{
1193 uint64_t die_addr;
1194 uint32_t page, block;
1195 int ret;
1196
1197 if (!snf)
1198 return -EINVAL;
1199
1200 if (addr >= snf->size)
1201 return -EINVAL;
1202
1203 die_addr = mtk_snand_select_die_address(snf, addr);
1204 block = die_addr >> snf->erasesize_shift;
1205 page = block << (snf->erasesize_shift - snf->writesize_shift);
1206
1207 ret = mtk_snand_write_enable(snf);
1208 if (ret)
1209 return ret;
1210
1211 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1212 if (ret)
1213 return ret;
1214
1215 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1216 if (ret < 0) {
1217 snand_log_chip(snf->pdev,
1218 "Block erase command timed out on block %u\n",
1219 block);
1220 return ret;
1221 }
1222
1223 if (ret & SNAND_STATUS_ERASE_FAIL) {
1224 snand_log_chip(snf->pdev,
1225 "Block erase failed on block %u\n", block);
1226 return -EIO;
1227 }
1228
1229 return 0;
1230}
1231
1232static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1233{
1234 int ret;
1235
1236 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1237 false);
1238 if (ret && ret != -EBADMSG)
1239 return ret;
1240
1241 return snf->buf_cache[0] != 0xff;
1242}
1243
1244static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1245{
1246 int ret;
1247
1248 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1249 true);
1250 if (ret && ret != -EBADMSG)
1251 return ret;
1252
1253 return snf->buf_cache[0] != 0xff;
1254}
1255
1256int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1257{
1258 if (!snf)
1259 return -EINVAL;
1260
1261 if (addr >= snf->size)
1262 return -EINVAL;
1263
1264 addr &= ~snf->erasesize_mask;
1265
1266 if (snf->nfi_soc->bbm_swap)
1267 return mtk_snand_block_isbad_std(snf, addr);
1268
1269 return mtk_snand_block_isbad_mtk(snf, addr);
1270}
1271
1272static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1273{
1274 /* Standard BBM position */
1275 memset(snf->buf_cache, 0xff, snf->oobsize);
1276 snf->buf_cache[0] = 0;
1277
1278 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1279 false);
1280}
1281
1282static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1283{
1284 /* Write the whole page with zeros */
1285 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1286
1287 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1288 snf->buf_cache + snf->writesize, true,
1289 true);
1290}
1291
1292int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1293{
1294 if (!snf)
1295 return -EINVAL;
1296
1297 if (addr >= snf->size)
1298 return -EINVAL;
1299
1300 addr &= ~snf->erasesize_mask;
1301
1302 if (snf->nfi_soc->bbm_swap)
1303 return mtk_snand_block_markbad_std(snf, addr);
1304
1305 return mtk_snand_block_markbad_mtk(snf, addr);
1306}
1307
1308int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1309 const uint8_t *oobbuf, size_t ooblen)
1310{
1311 size_t len = ooblen, sect_fdm_len;
1312 const uint8_t *oob = oobbuf;
1313 uint32_t step = 0;
1314
1315 if (!snf || !oobraw || !oob)
1316 return -EINVAL;
1317
1318 while (len && step < snf->ecc_steps) {
1319 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1320 if (sect_fdm_len > len)
1321 sect_fdm_len = len;
1322
1323 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1324 sect_fdm_len);
1325
1326 len -= sect_fdm_len;
1327 oob += sect_fdm_len;
1328 step++;
1329 }
1330
1331 return len;
1332}
1333
1334int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1335 size_t ooblen, const uint8_t *oobraw)
1336{
1337 size_t len = ooblen, sect_fdm_len;
1338 uint8_t *oob = oobbuf;
1339 uint32_t step = 0;
1340
1341 if (!snf || !oobraw || !oob)
1342 return -EINVAL;
1343
1344 while (len && step < snf->ecc_steps) {
1345 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1346 if (sect_fdm_len > len)
1347 sect_fdm_len = len;
1348
1349 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1350 sect_fdm_len);
1351
1352 len -= sect_fdm_len;
1353 oob += sect_fdm_len;
1354 step++;
1355 }
1356
1357 return len;
1358}
1359
1360int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1361 void *buf, void *oob, size_t ooblen,
1362 size_t *actualooblen, bool raw)
1363{
1364 int ret, oobremain;
1365
1366 if (!snf)
1367 return -EINVAL;
1368
1369 if (!oob)
1370 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1371
1372 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1373 if (ret && ret != -EBADMSG) {
1374 if (actualooblen)
1375 *actualooblen = 0;
1376 return ret;
1377 }
1378
1379 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1380 if (actualooblen)
1381 *actualooblen = ooblen - oobremain;
1382
1383 return ret;
1384}
1385
1386int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1387 const void *buf, const void *oob,
1388 size_t ooblen, size_t *actualooblen, bool raw)
1389{
1390 int oobremain;
1391
1392 if (!snf)
1393 return -EINVAL;
1394
1395 if (!oob)
1396 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1397
1398 memset(snf->buf_cache, 0xff, snf->oobsize);
1399 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1400 if (actualooblen)
1401 *actualooblen = ooblen - oobremain;
1402
1403 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1404}
1405
1406int mtk_snand_get_chip_info(struct mtk_snand *snf,
1407 struct mtk_snand_chip_info *info)
1408{
1409 if (!snf || !info)
1410 return -EINVAL;
1411
1412 info->model = snf->model;
1413 info->chipsize = snf->size;
1414 info->blocksize = snf->erasesize;
1415 info->pagesize = snf->writesize;
1416 info->sparesize = snf->oobsize;
1417 info->spare_per_sector = snf->spare_per_sector;
1418 info->fdm_size = snf->nfi_soc->fdm_size;
1419 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1420 info->num_sectors = snf->ecc_steps;
1421 info->sector_size = snf->nfi_soc->sector_size;
1422 info->ecc_strength = snf->ecc_strength;
1423 info->ecc_bytes = snf->ecc_bytes;
1424
1425 return 0;
1426}
1427
1428int mtk_snand_irq_process(struct mtk_snand *snf)
1429{
1430 uint32_t sta, ien;
1431
1432 if (!snf)
1433 return -EINVAL;
1434
1435 sta = nfi_read32(snf, NFI_INTR_STA);
1436 ien = nfi_read32(snf, NFI_INTR_EN);
1437
1438 if (!(sta & ien))
1439 return 0;
1440
1441 nfi_write32(snf, NFI_INTR_EN, 0);
1442 irq_completion_done(snf->pdev);
1443
1444 return 1;
1445}
1446
1447static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1448{
1449 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1450 int i, mul = 1;
1451
1452 /*
1453 * If we're using the 1KB sector size, HW will automatically
1454 * double the spare size. So we should only use half of the value.
1455 */
1456 if (snf->nfi_soc->sector_size == 1024)
1457 mul = 2;
1458
1459 spare_per_step /= mul;
1460
1461 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1462 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1463 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1464 snf->spare_per_sector *= mul;
1465 return i;
1466 }
1467 }
1468
1469 snand_log_nfi(snf->pdev,
1470 "Page size %u+%u is not supported\n", snf->writesize,
1471 snf->oobsize);
1472
1473 return -1;
1474}
1475
1476static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1477{
1478 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1479 uint32_t sector_size_512;
1480
1481 if (snf->nfi_soc->sector_size == 512) {
1482 sector_size_512 = NFI_SEC_SEL_512;
1483 spare_size_shift = NFI_SPARE_SIZE_S;
1484 } else {
1485 sector_size_512 = 0;
1486 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1487 }
1488
1489 switch (snf->writesize) {
1490 case SZ_512:
1491 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1492 break;
1493 case SZ_2K:
1494 if (snf->nfi_soc->sector_size == 512)
1495 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1496 else
1497 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1498 break;
1499 case SZ_4K:
1500 if (snf->nfi_soc->sector_size == 512)
1501 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1502 else
1503 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1504 break;
1505 case SZ_8K:
1506 if (snf->nfi_soc->sector_size == 512)
1507 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1508 else
1509 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1510 break;
1511 case SZ_16K:
1512 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1513 break;
1514 default:
1515 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1516 snf->writesize);
1517 return -ENOTSUPP;
1518 }
1519
1520 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1521 if (unlikely(spare_size_idx < 0))
1522 return -ENOTSUPP;
1523
1524 snf->raw_sector_size = snf->nfi_soc->sector_size +
1525 snf->spare_per_sector;
1526
1527 /* Setup page format */
1528 nfi_write32(snf, NFI_PAGEFMT,
1529 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1530 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1531 (spare_size_idx << spare_size_shift) |
1532 (pagesize_idx << NFI_PAGE_SIZE_S) |
1533 sector_size_512);
1534
1535 return 0;
1536}
1537
1538static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1539 uint32_t snfi_caps, uint8_t *opcode,
1540 uint8_t *dummy,
1541 const struct snand_io_cap *op_cap)
1542{
1543 uint32_t i, caps;
1544
1545 caps = snfi_caps & op_cap->caps;
1546
1547 i = fls(caps);
1548 if (i > 0) {
1549 *opcode = op_cap->opcodes[i - 1].opcode;
1550 if (dummy)
1551 *dummy = op_cap->opcodes[i - 1].dummy;
1552 return i - 1;
1553 }
1554
1555 return __SNAND_IO_MAX;
1556}
1557
1558static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1559 uint32_t snfi_caps,
1560 const struct snand_io_cap *op_cap)
1561{
1562 enum snand_flash_io idx;
1563
1564 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1565 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1566 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1567 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1568 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1569 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1570 };
1571
1572 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1573 &snf->dummy_rfc, op_cap);
1574 if (idx >= __SNAND_IO_MAX) {
1575 snand_log_snfi(snf->pdev,
1576 "No capable opcode for read from cache\n");
1577 return -ENOTSUPP;
1578 }
1579
1580 snf->mode_rfc = rfc_modes[idx];
1581
1582 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1583 snf->quad_spi_op = true;
1584
1585 return 0;
1586}
1587
1588static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1589 const struct snand_io_cap *op_cap)
1590{
1591 enum snand_flash_io idx;
1592
1593 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1594 [SNAND_IO_1_1_1] = 0,
1595 [SNAND_IO_1_1_4] = 1,
1596 };
1597
1598 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1599 NULL, op_cap);
1600 if (idx >= __SNAND_IO_MAX) {
1601 snand_log_snfi(snf->pdev,
1602 "No capable opcode for program load\n");
1603 return -ENOTSUPP;
1604 }
1605
1606 snf->mode_pl = pl_modes[idx];
1607
1608 if (idx == SNAND_IO_1_1_4)
1609 snf->quad_spi_op = true;
1610
1611 return 0;
1612}
1613
1614static int mtk_snand_setup(struct mtk_snand *snf,
1615 const struct snand_flash_info *snand_info)
1616{
1617 const struct snand_mem_org *memorg = &snand_info->memorg;
1618 uint32_t i, msg_size, snfi_caps;
1619 int ret;
1620
1621 /* Calculate flash memory organization */
1622 snf->model = snand_info->model;
1623 snf->writesize = memorg->pagesize;
1624 snf->oobsize = memorg->sparesize;
1625 snf->erasesize = snf->writesize * memorg->pages_per_block;
1626 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1627 snf->size = snf->die_size * memorg->ndies;
1628 snf->num_dies = memorg->ndies;
1629
1630 snf->writesize_mask = snf->writesize - 1;
1631 snf->erasesize_mask = snf->erasesize - 1;
1632 snf->die_mask = snf->die_size - 1;
1633
1634 snf->writesize_shift = ffs(snf->writesize) - 1;
1635 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1636 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1637
1638 snf->select_die = snand_info->select_die;
1639
1640 /* Determine opcodes for read from cache/program load */
1641 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1642 if (snf->snfi_quad_spi)
1643 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1644
1645 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1646 if (ret)
1647 return ret;
1648
1649 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1650 if (ret)
1651 return ret;
1652
1653 /* ECC and page format */
1654 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1655 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1656 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1657 snf->writesize);
1658 return -ENOTSUPP;
1659 }
1660
1661 ret = mtk_snand_pagefmt_setup(snf);
1662 if (ret)
1663 return ret;
1664
1665 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1666 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1667 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1668 msg_size);
1669 if (ret)
1670 return ret;
1671
1672 nfi_write16(snf, NFI_CNFG, 0);
1673
1674 /* Tuning options */
1675 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
1676 nfi_write32(snf, SNF_DLY_CTL3, (40 << SFCK_SAM_DLY_S));
1677
1678 /* Interrupts */
1679 nfi_read32(snf, NFI_INTR_STA);
1680 nfi_write32(snf, NFI_INTR_EN, 0);
1681
1682 /* Clear SNF done flag */
1683 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1684 nfi_write32(snf, SNF_STA_CTL1, 0);
1685
1686 /* Initialization on all dies */
1687 for (i = 0; i < snf->num_dies; i++) {
1688 mtk_snand_select_die(snf, i);
1689
1690 /* Disable On-Die ECC engine */
1691 ret = mtk_snand_ondie_ecc_control(snf, false);
1692 if (ret)
1693 return ret;
1694
1695 /* Disable block protection */
1696 mtk_snand_unlock(snf);
1697
1698 /* Enable/disable quad-spi */
1699 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1700 }
1701
1702 mtk_snand_select_die(snf, 0);
1703
1704 return 0;
1705}
1706
1707static int mtk_snand_id_probe(struct mtk_snand *snf,
1708 const struct snand_flash_info **snand_info)
1709{
1710 uint8_t id[4], op[2];
1711 int ret;
1712
1713 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1714 op[0] = SNAND_CMD_READID;
1715 op[1] = 0;
1716 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1717 if (ret)
1718 return ret;
1719
1720 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1721 if (*snand_info)
1722 return 0;
1723
1724 /* Read SPI-NAND JEDEC ID, OP + ID */
1725 op[0] = SNAND_CMD_READID;
1726 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1727 if (ret)
1728 return ret;
1729
1730 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1731 if (*snand_info)
1732 return 0;
1733
1734 snand_log_chip(snf->pdev,
1735 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1736 id[0], id[1], id[2], id[3]);
1737
1738 return -EINVAL;
1739}
1740
1741int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1742 struct mtk_snand **psnf)
1743{
1744 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001745 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001746 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001747 int ret;
1748
1749 if (!pdata || !psnf)
1750 return -EINVAL;
1751
1752 if (pdata->soc >= __SNAND_SOC_MAX) {
1753 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1754 pdata->soc);
1755 return -EINVAL;
1756 }
1757
1758 /* Dummy instance only for initial reset and id probe */
1759 tmpsnf.nfi_base = pdata->nfi_base;
1760 tmpsnf.ecc_base = pdata->ecc_base;
1761 tmpsnf.soc = pdata->soc;
1762 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1763 tmpsnf.pdev = dev;
1764
1765 /* Switch to SNFI mode */
1766 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1767
1768 /* Reset SNFI & NFI */
1769 mtk_snand_mac_reset(&tmpsnf);
1770 mtk_nfi_reset(&tmpsnf);
1771
1772 /* Reset SPI-NAND chip */
1773 ret = mtk_snand_chip_reset(&tmpsnf);
1774 if (ret) {
1775 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1776 return ret;
1777 }
1778
1779 /* Probe SPI-NAND flash by JEDEC ID */
1780 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1781 if (ret)
1782 return ret;
1783
1784 rawpage_size = snand_info->memorg.pagesize +
1785 snand_info->memorg.sparesize;
1786
developer4da1bed2021-05-08 17:30:37 +08001787 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1788 sizeof(*snf->sect_bf);
1789
developerfd40db22021-04-29 10:08:25 +08001790 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001791 snf = generic_mem_alloc(dev,
1792 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001793 if (!snf) {
1794 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1795 return -ENOMEM;
1796 }
1797
developer4da1bed2021-05-08 17:30:37 +08001798 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1799 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001800
1801 /* Allocate memory for DMA buffer */
1802 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1803 if (!snf->page_cache) {
1804 generic_mem_free(dev, snf);
1805 snand_log_chip(dev,
1806 "Failed to allocate memory for DMA buffer\n");
1807 return -ENOMEM;
1808 }
1809
1810 /* Fill up instance */
1811 snf->pdev = dev;
1812 snf->nfi_base = pdata->nfi_base;
1813 snf->ecc_base = pdata->ecc_base;
1814 snf->soc = pdata->soc;
1815 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1816 snf->snfi_quad_spi = pdata->quad_spi;
1817
1818 /* Initialize SNFI & ECC engine */
1819 ret = mtk_snand_setup(snf, snand_info);
1820 if (ret) {
1821 dma_mem_free(dev, snf->page_cache);
1822 generic_mem_free(dev, snf);
1823 return ret;
1824 }
1825
1826 *psnf = snf;
1827
1828 return 0;
1829}
1830
1831int mtk_snand_cleanup(struct mtk_snand *snf)
1832{
1833 if (!snf)
1834 return 0;
1835
1836 dma_mem_free(snf->pdev, snf->page_cache);
1837 generic_mem_free(snf->pdev, snf);
1838
1839 return 0;
1840}