blob: 17254a39483bb9abf4701cfc76913fe3478be8c8 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
60#define NFI_STRADDR 0x080
61
62#define NFI_FDM0L 0x0a0
63#define NFI_FDM0M 0x0a4
64#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
65#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
66
67#define NFI_DEBUG_CON1 0x220
68#define WBUF_EN BIT(2)
69
70#define NFI_MASTERSTA 0x224
71#define MAS_ADDR GENMASK(11, 9)
72#define MAS_RD GENMASK(8, 6)
73#define MAS_WR GENMASK(5, 3)
74#define MAS_RDDLY GENMASK(2, 0)
75#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
76#define AHB_BUS_BUSY BIT(1)
77#define BUS_BUSY BIT(0)
78#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
79
80/* SNFI registers */
81#define SNF_MAC_CTL 0x500
82#define MAC_XIO_SEL BIT(4)
83#define SF_MAC_EN BIT(3)
84#define SF_TRIG BIT(2)
85#define WIP_READY BIT(1)
86#define WIP BIT(0)
87
88#define SNF_MAC_OUTL 0x504
89#define SNF_MAC_INL 0x508
90
91#define SNF_RD_CTL2 0x510
92#define DATA_READ_DUMMY_S 8
93#define DATA_READ_CMD_S 0
94
95#define SNF_RD_CTL3 0x514
96
97#define SNF_PG_CTL1 0x524
98#define PG_LOAD_CMD_S 8
99
100#define SNF_PG_CTL2 0x528
101
102#define SNF_MISC_CTL 0x538
103#define SW_RST BIT(28)
104#define FIFO_RD_LTC_S 25
105#define PG_LOAD_X4_EN BIT(20)
106#define DATA_READ_MODE_S 16
107#define DATA_READ_MODE GENMASK(18, 16)
108#define DATA_READ_MODE_X1 0
109#define DATA_READ_MODE_X2 1
110#define DATA_READ_MODE_X4 2
111#define DATA_READ_MODE_DUAL 5
112#define DATA_READ_MODE_QUAD 6
113#define PG_LOAD_CUSTOM_EN BIT(7)
114#define DATARD_CUSTOM_EN BIT(6)
115#define CS_DESELECT_CYC_S 0
116
117#define SNF_MISC_CTL2 0x53c
118#define PROGRAM_LOAD_BYTE_NUM_S 16
119#define READ_DATA_BYTE_NUM_S 11
120
121#define SNF_DLY_CTL3 0x548
122#define SFCK_SAM_DLY_S 0
123
124#define SNF_STA_CTL1 0x550
125#define CUS_PG_DONE BIT(28)
126#define CUS_READ_DONE BIT(27)
127#define SPI_STATE_S 0
128#define SPI_STATE GENMASK(3, 0)
129
130#define SNF_CFG 0x55c
131#define SPI_MODE BIT(0)
132
133#define SNF_GPRAM 0x800
134#define SNF_GPRAM_SIZE 0xa0
135
136#define SNFI_POLL_INTERVAL 1000000
137
138static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
139
140static const uint8_t mt7986_spare_sizes[] = {
141 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
142 67, 74
143};
144
145static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
146 [SNAND_SOC_MT7622] = {
147 .sector_size = 512,
148 .max_sectors = 8,
149 .fdm_size = 8,
150 .fdm_ecc_size = 1,
151 .fifo_size = 32,
152 .bbm_swap = false,
153 .empty_page_check = false,
154 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
155 .spare_sizes = mt7622_spare_sizes,
156 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
157 },
158 [SNAND_SOC_MT7629] = {
159 .sector_size = 512,
160 .max_sectors = 8,
161 .fdm_size = 8,
162 .fdm_ecc_size = 1,
163 .fifo_size = 32,
164 .bbm_swap = true,
165 .empty_page_check = false,
166 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
167 .spare_sizes = mt7622_spare_sizes,
168 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
169 },
170 [SNAND_SOC_MT7986] = {
171 .sector_size = 1024,
172 .max_sectors = 16,
173 .fdm_size = 8,
174 .fdm_ecc_size = 1,
175 .fifo_size = 64,
176 .bbm_swap = true,
177 .empty_page_check = true,
178 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
179 .spare_sizes = mt7986_spare_sizes,
180 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes)
181 },
182};
183
184static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
185{
186 return readl(snf->nfi_base + reg);
187}
188
189static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
190 uint32_t val)
191{
192 writel(val, snf->nfi_base + reg);
193}
194
195static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
196 uint16_t val)
197{
198 writew(val, snf->nfi_base + reg);
199}
200
201static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
202 uint32_t set)
203{
204 uint32_t val;
205
206 val = readl(snf->nfi_base + reg);
207 val &= ~clr;
208 val |= set;
209 writel(val, snf->nfi_base + reg);
210}
211
212static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
213 const uint8_t *data, uint32_t len)
214{
215 uint32_t i, val = 0, es = sizeof(uint32_t);
216
217 for (i = reg; i < reg + len; i++) {
218 val |= ((uint32_t)*data++) << (8 * (i % es));
219
220 if (i % es == es - 1 || i == reg + len - 1) {
221 nfi_write32(snf, i & ~(es - 1), val);
222 val = 0;
223 }
224 }
225}
226
227static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
228 uint32_t len)
229{
230 uint32_t i, val = 0, es = sizeof(uint32_t);
231
232 for (i = reg; i < reg + len; i++) {
233 if (i == reg || i % es == 0)
234 val = nfi_read32(snf, i & ~(es - 1));
235
236 *data++ = (uint8_t)(val >> (8 * (i % es)));
237 }
238}
239
240static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
241{
242 uint8_t tmp = *bm1;
243 *bm1 = *bm2;
244 *bm2 = tmp;
245}
246
247static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
248{
249 uint32_t fdm_bbm_pos;
250
251 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
252 return;
253
254 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
255 snf->nfi_soc->sector_size;
256 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
257 &snf->page_cache[snf->writesize]);
258}
259
260static void mtk_snand_bm_swap(struct mtk_snand *snf)
261{
262 uint32_t buf_bbm_pos, fdm_bbm_pos;
263
264 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
265 return;
266
267 buf_bbm_pos = snf->writesize -
268 (snf->ecc_steps - 1) * snf->spare_per_sector;
269 fdm_bbm_pos = snf->writesize +
270 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
271 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
272 &snf->page_cache[buf_bbm_pos]);
273}
274
275static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
276{
277 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
278
279 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
280 return;
281
282 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
283 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
284 snf->nfi_soc->sector_size;
285 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
286 &snf->page_cache[fdm_bbm_pos2]);
287}
288
289static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
290{
291 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
292
293 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
294 return;
295
296 fdm_bbm_pos1 = snf->writesize;
297 fdm_bbm_pos2 = snf->writesize +
298 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
299 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
300 &snf->page_cache[fdm_bbm_pos2]);
301}
302
303static int mtk_nfi_reset(struct mtk_snand *snf)
304{
305 uint32_t val, fifo_mask;
306 int ret;
307
308 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
309
310 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
311 !(val & snf->nfi_soc->mastersta_mask), 0,
312 SNFI_POLL_INTERVAL);
313 if (ret) {
314 snand_log_nfi(snf->pdev,
315 "NFI master is still busy after reset\n");
316 return ret;
317 }
318
319 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
320 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
321 SNFI_POLL_INTERVAL);
322 if (ret) {
323 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
324 return ret;
325 }
326
327 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
328 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
329 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
330 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
331 if (ret) {
332 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
333 return ret;
334 }
335
336 return 0;
337}
338
339static int mtk_snand_mac_reset(struct mtk_snand *snf)
340{
341 int ret;
342 uint32_t val;
343
344 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
345
346 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
347 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
348 if (ret)
349 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
350
351 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
352 (10 << CS_DESELECT_CYC_S));
353
354 return ret;
355}
356
357static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
358 uint32_t inlen)
359{
360 int ret;
361 uint32_t val;
362
363 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
364 nfi_write32(snf, SNF_MAC_OUTL, outlen);
365 nfi_write32(snf, SNF_MAC_INL, inlen);
366
367 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
368
369 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
370 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
371 if (ret) {
372 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
373 goto cleanup;
374 }
375
376 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
377 !(val & WIP), 0, SNFI_POLL_INTERVAL);
378 if (ret) {
379 snand_log_snfi(snf->pdev,
380 "Timed out waiting for WIP cleared\n");
381 }
382
383cleanup:
384 nfi_write32(snf, SNF_MAC_CTL, 0);
385
386 return ret;
387}
388
389int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
390 uint8_t *in, uint32_t inlen)
391{
392 int ret;
393
394 if (outlen + inlen > SNF_GPRAM_SIZE)
395 return -EINVAL;
396
397 mtk_snand_mac_reset(snf);
398
399 nfi_write_data(snf, SNF_GPRAM, out, outlen);
400
401 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
402 if (ret)
403 return ret;
404
405 if (!inlen)
406 return 0;
407
408 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
409
410 return 0;
411}
412
413static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
414{
415 uint8_t op[2], val;
416 int ret;
417
418 op[0] = SNAND_CMD_GET_FEATURE;
419 op[1] = (uint8_t)addr;
420
421 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
422 if (ret)
423 return ret;
424
425 return val;
426}
427
428int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
429{
430 uint8_t op[3];
431
432 op[0] = SNAND_CMD_SET_FEATURE;
433 op[1] = (uint8_t)addr;
434 op[2] = (uint8_t)val;
435
436 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
437}
438
439static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
440{
441 int val;
442 mtk_snand_time_t time_start, tmo;
443
444 time_start = timer_get_ticks();
445 tmo = timer_time_to_tick(wait_us);
446
447 do {
448 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
449 if (!(val & SNAND_STATUS_OIP))
450 return val & (SNAND_STATUS_ERASE_FAIL |
451 SNAND_STATUS_PROGRAM_FAIL);
452 } while (!timer_is_timeout(time_start, tmo));
453
454 return -ETIMEDOUT;
455}
456
457int mtk_snand_chip_reset(struct mtk_snand *snf)
458{
459 uint8_t op = SNAND_CMD_RESET;
460 int ret;
461
462 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
463 if (ret)
464 return ret;
465
466 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
467 if (ret < 0)
468 return ret;
469
470 return 0;
471}
472
473static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
474 uint8_t set)
475{
476 int val, newval;
477 int ret;
478
479 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
480 if (val < 0) {
481 snand_log_chip(snf->pdev,
482 "Failed to get configuration feature\n");
483 return val;
484 }
485
486 newval = (val & (~clr)) | set;
487
488 if (newval == val)
489 return 0;
490
491 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
492 (uint8_t)newval);
493 if (val < 0) {
494 snand_log_chip(snf->pdev,
495 "Failed to set configuration feature\n");
496 return ret;
497 }
498
499 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
500 if (val < 0) {
501 snand_log_chip(snf->pdev,
502 "Failed to get configuration feature\n");
503 return val;
504 }
505
506 if (newval != val)
507 return -ENOTSUPP;
508
509 return 0;
510}
511
512static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
513{
514 int ret;
515
516 if (enable)
517 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
518 else
519 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
520
521 if (ret) {
522 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
523 enable ? "enable" : "disable");
524 }
525
526 return ret;
527}
528
529static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
530{
531 int ret;
532
533 if (enable) {
534 ret = mtk_snand_config_feature(snf, 0,
535 SNAND_FEATURE_QUAD_ENABLE);
536 } else {
537 ret = mtk_snand_config_feature(snf,
538 SNAND_FEATURE_QUAD_ENABLE, 0);
539 }
540
541 if (ret) {
542 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
543 enable ? "enable" : "disable");
544 }
545
546 return ret;
547}
548
549static int mtk_snand_unlock(struct mtk_snand *snf)
550{
551 int ret;
552
553 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
554 if (ret) {
555 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
556 return ret;
557 }
558
559 return 0;
560}
561
562static int mtk_snand_write_enable(struct mtk_snand *snf)
563{
564 uint8_t op = SNAND_CMD_WRITE_ENABLE;
565 int ret, val;
566
567 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
568 if (ret)
569 return ret;
570
571 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
572 if (val < 0)
573 return ret;
574
575 if (val & SNAND_STATUS_WEL)
576 return 0;
577
578 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
579
580 return -ENOTSUPP;
581}
582
583static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
584{
585 if (!snf->select_die)
586 return 0;
587
588 return snf->select_die(snf, dieidx);
589}
590
591static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
592 uint64_t addr)
593{
594 uint32_t dieidx;
595
596 if (!snf->select_die)
597 return addr;
598
599 dieidx = addr >> snf->die_shift;
600
601 mtk_snand_select_die(snf, dieidx);
602
603 return addr & snf->die_mask;
604}
605
606static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
607 uint32_t page)
608{
609 uint32_t pages_per_block;
610
611 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
612
613 if (page & pages_per_block)
614 return 1 << (snf->writesize_shift + 1);
615
616 return 0;
617}
618
619static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
620{
621 uint8_t op[4];
622
623 op[0] = cmd;
624 op[1] = (page >> 16) & 0xff;
625 op[2] = (page >> 8) & 0xff;
626 op[3] = page & 0xff;
627
628 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
629}
630
631static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
632{
633 uint32_t vall, valm;
634 uint8_t *oobptr = buf;
635 int i, j;
636
637 for (i = 0; i < snf->ecc_steps; i++) {
638 vall = nfi_read32(snf, NFI_FDML(i));
639 valm = nfi_read32(snf, NFI_FDMM(i));
640
641 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
642 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
643
644 oobptr += snf->nfi_soc->fdm_size;
645 }
646}
647
648static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
649{
650 uint32_t coladdr, rwbytes, mode, len;
651 uintptr_t dma_addr;
652 int ret;
653
654 /* Column address with plane bit */
655 coladdr = mtk_snand_get_plane_address(snf, page);
656
657 mtk_snand_mac_reset(snf);
658 mtk_nfi_reset(snf);
659
660 /* Command and dummy cycles */
661 nfi_write32(snf, SNF_RD_CTL2,
662 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
663 (snf->opcode_rfc << DATA_READ_CMD_S));
664
665 /* Column address */
666 nfi_write32(snf, SNF_RD_CTL3, coladdr);
667
668 /* Set read mode */
669 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
670 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, mode | DATARD_CUSTOM_EN);
671
672 /* Set bytes to read */
673 rwbytes = snf->ecc_steps * snf->raw_sector_size;
674 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
675 rwbytes);
676
677 /* NFI read prepare */
678 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
679 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
680 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
681
682 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
683
684 /* Prepare for DMA read */
685 len = snf->writesize + snf->oobsize;
686 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
687 if (ret) {
688 snand_log_nfi(snf->pdev,
689 "DMA map from device failed with %d\n", ret);
690 return ret;
691 }
692
693 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
694
695 if (!raw)
696 mtk_snand_ecc_decoder_start(snf);
697
698 /* Prepare for custom read interrupt */
699 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
700 irq_completion_init(snf->pdev);
701
702 /* Trigger NFI into custom mode */
703 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
704
705 /* Start DMA read */
706 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
707 nfi_write16(snf, NFI_STRDATA, STR_DATA);
708
709 /* Wait for operation finished */
710 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
711 CUS_READ_DONE, SNFI_POLL_INTERVAL);
712 if (ret) {
713 snand_log_nfi(snf->pdev,
714 "DMA timed out for reading from cache\n");
715 goto cleanup;
716 }
717
718 if (!raw) {
719 ret = mtk_ecc_wait_decoder_done(snf);
720 if (ret)
721 goto cleanup;
722
723 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
724
725 /*
726 * For new IPs, ecc error may occure on empty pages.
727 * Use an specific indication bit to check empty page.
728 */
729 if (snf->nfi_soc->empty_page_check &&
730 (nfi_read32(snf, NFI_STA) & READ_EMPTY))
731 ret = 0;
732 else
733 ret = mtk_ecc_check_decode_error(snf, page);
734
735 mtk_snand_ecc_decoder_stop(snf);
736 }
737
738cleanup:
739 /* DMA cleanup */
740 dma_mem_unmap(snf->pdev, dma_addr, len, false);
741
742 /* Stop read */
743 nfi_write32(snf, NFI_CON, 0);
744
745 /* Clear SNF done flag */
746 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
747 nfi_write32(snf, SNF_STA_CTL1, 0);
748
749 /* Disable interrupt */
750 nfi_read32(snf, NFI_INTR_STA);
751 nfi_write32(snf, NFI_INTR_EN, 0);
752
753 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
754
755 return ret;
756}
757
758static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
759{
760 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
761 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
762 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
763
764 for (i = 0; i < snf->ecc_steps; i++) {
765 raw_sector = snf->page_cache + i * snf->raw_sector_size;
766
767 if (buf) {
768 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
769 bufptr += snf->nfi_soc->sector_size;
770 }
771
772 raw_sector += snf->nfi_soc->sector_size;
773
774 if (oob) {
775 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
776 oobptr += snf->nfi_soc->fdm_size;
777 raw_sector += snf->nfi_soc->fdm_size;
778
779 memcpy(eccptr, raw_sector, ecc_bytes);
780 eccptr += ecc_bytes;
781 }
782 }
783}
784
785static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
786 void *buf, void *oob, bool raw, bool format)
787{
788 uint64_t die_addr;
789 uint32_t page;
790 int ret;
791
792 die_addr = mtk_snand_select_die_address(snf, addr);
793 page = die_addr >> snf->writesize_shift;
794
795 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
796 if (ret)
797 return ret;
798
799 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
800 if (ret < 0) {
801 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
802 return ret;
803 }
804
805 ret = mtk_snand_read_cache(snf, page, raw);
806 if (ret < 0 && ret != -EBADMSG)
807 return ret;
808
809 if (raw) {
810 if (format) {
811 mtk_snand_bm_swap_raw(snf);
812 mtk_snand_fdm_bm_swap_raw(snf);
813 mtk_snand_from_raw_page(snf, buf, oob);
814 } else {
815 if (buf)
816 memcpy(buf, snf->page_cache, snf->writesize);
817
818 if (oob) {
819 memset(oob, 0xff, snf->oobsize);
820 memcpy(oob, snf->page_cache + snf->writesize,
821 snf->ecc_steps * snf->spare_per_sector);
822 }
823 }
824 } else {
825 mtk_snand_bm_swap(snf);
826 mtk_snand_fdm_bm_swap(snf);
827
828 if (buf)
829 memcpy(buf, snf->page_cache, snf->writesize);
830
831 if (oob) {
832 memset(oob, 0xff, snf->oobsize);
833 memcpy(oob, snf->page_cache + snf->writesize,
834 snf->ecc_steps * snf->nfi_soc->fdm_size);
835 }
836 }
837
838 return ret;
839}
840
841int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
842 void *oob, bool raw)
843{
844 if (!snf || (!buf && !oob))
845 return -EINVAL;
846
847 if (addr >= snf->size)
848 return -EINVAL;
849
850 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
851}
852
853static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
854{
855 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
856 const uint8_t *oobptr = buf;
857 int i, j;
858
859 for (i = 0; i < snf->ecc_steps; i++) {
860 vall = 0;
861 valm = 0;
862
863 for (j = 0; j < 8; j++) {
864 if (j < 4)
865 vall |= (j < fdm_size ? oobptr[j] : 0xff)
866 << (j * 8);
867 else
868 valm |= (j < fdm_size ? oobptr[j] : 0xff)
869 << ((j - 4) * 8);
870 }
871
872 nfi_write32(snf, NFI_FDML(i), vall);
873 nfi_write32(snf, NFI_FDMM(i), valm);
874
875 oobptr += fdm_size;
876 }
877}
878
879static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
880 bool raw)
881{
882 uint32_t coladdr, rwbytes, mode, len;
883 uintptr_t dma_addr;
884 int ret;
885
886 /* Column address with plane bit */
887 coladdr = mtk_snand_get_plane_address(snf, page);
888
889 mtk_snand_mac_reset(snf);
890 mtk_nfi_reset(snf);
891
892 /* Write FDM registers if necessary */
893 if (!raw)
894 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
895
896 /* Command */
897 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
898
899 /* Column address */
900 nfi_write32(snf, SNF_PG_CTL2, coladdr);
901
902 /* Set write mode */
903 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
904 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
905
906 /* Set bytes to write */
907 rwbytes = snf->ecc_steps * snf->raw_sector_size;
908 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
909 rwbytes);
910
911 /* NFI write prepare */
912 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
913 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
914 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
915
916 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
917
918 /* Prepare for DMA write */
919 len = snf->writesize + snf->oobsize;
920 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
921 if (ret) {
922 snand_log_nfi(snf->pdev,
923 "DMA map to device failed with %d\n", ret);
924 return ret;
925 }
926
927 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
928
929 if (!raw)
930 mtk_snand_ecc_encoder_start(snf);
931
932 /* Prepare for custom write interrupt */
933 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
934 irq_completion_init(snf->pdev);
935
936 /* Trigger NFI into custom mode */
937 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
938
939 /* Start DMA write */
940 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
941 nfi_write16(snf, NFI_STRDATA, STR_DATA);
942
943 /* Wait for operation finished */
944 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
945 CUS_PG_DONE, SNFI_POLL_INTERVAL);
946 if (ret) {
947 snand_log_nfi(snf->pdev,
948 "DMA timed out for program load\n");
949 goto cleanup;
950 }
951
952 if (!raw)
953 mtk_snand_ecc_encoder_stop(snf);
954
955cleanup:
956 /* DMA cleanup */
957 dma_mem_unmap(snf->pdev, dma_addr, len, true);
958
959 /* Stop write */
960 nfi_write16(snf, NFI_CON, 0);
961
962 /* Clear SNF done flag */
963 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
964 nfi_write32(snf, SNF_STA_CTL1, 0);
965
966 /* Disable interrupt */
967 nfi_read32(snf, NFI_INTR_STA);
968 nfi_write32(snf, NFI_INTR_EN, 0);
969
970 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
971
972 return ret;
973}
974
975static void mtk_snand_to_raw_page(struct mtk_snand *snf,
976 const void *buf, const void *oob,
977 bool empty_ecc)
978{
979 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
980 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
981 const uint8_t *bufptr = buf, *oobptr = oob;
982 uint8_t *raw_sector;
983
984 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
985 for (i = 0; i < snf->ecc_steps; i++) {
986 raw_sector = snf->page_cache + i * snf->raw_sector_size;
987
988 if (buf) {
989 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
990 bufptr += snf->nfi_soc->sector_size;
991 }
992
993 raw_sector += snf->nfi_soc->sector_size;
994
995 if (oob) {
996 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
997 oobptr += snf->nfi_soc->fdm_size;
998 raw_sector += snf->nfi_soc->fdm_size;
999
1000 if (empty_ecc)
1001 memset(raw_sector, 0xff, ecc_bytes);
1002 else
1003 memcpy(raw_sector, eccptr, ecc_bytes);
1004 eccptr += ecc_bytes;
1005 }
1006 }
1007}
1008
1009static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1010 const void *oob)
1011{
1012 const uint8_t *p = buf;
1013 uint32_t i, j;
1014
1015 if (buf) {
1016 for (i = 0; i < snf->writesize; i++) {
1017 if (p[i] != 0xff)
1018 return false;
1019 }
1020 }
1021
1022 if (oob) {
1023 for (j = 0; j < snf->ecc_steps; j++) {
1024 p = oob + j * snf->nfi_soc->fdm_size;
1025
1026 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1027 if (p[i] != 0xff)
1028 return false;
1029 }
1030 }
1031 }
1032
1033 return true;
1034}
1035
1036static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1037 const void *buf, const void *oob,
1038 bool raw, bool format)
1039{
1040 uint64_t die_addr;
1041 bool empty_ecc = false;
1042 uint32_t page;
1043 int ret;
1044
1045 die_addr = mtk_snand_select_die_address(snf, addr);
1046 page = die_addr >> snf->writesize_shift;
1047
1048 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1049 /*
1050 * If the data in the page to be ecc-ed is full 0xff,
1051 * change to raw write mode
1052 */
1053 raw = true;
1054 format = true;
1055
1056 /* fill ecc parity code region with 0xff */
1057 empty_ecc = true;
1058 }
1059
1060 if (raw) {
1061 if (format) {
1062 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1063 mtk_snand_fdm_bm_swap_raw(snf);
1064 mtk_snand_bm_swap_raw(snf);
1065 } else {
1066 memset(snf->page_cache, 0xff,
1067 snf->writesize + snf->oobsize);
1068
1069 if (buf)
1070 memcpy(snf->page_cache, buf, snf->writesize);
1071
1072 if (oob) {
1073 memcpy(snf->page_cache + snf->writesize, oob,
1074 snf->ecc_steps * snf->spare_per_sector);
1075 }
1076 }
1077 } else {
1078 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1079 if (buf)
1080 memcpy(snf->page_cache, buf, snf->writesize);
1081
1082 if (oob) {
1083 memcpy(snf->page_cache + snf->writesize, oob,
1084 snf->ecc_steps * snf->nfi_soc->fdm_size);
1085 }
1086
1087 mtk_snand_fdm_bm_swap(snf);
1088 mtk_snand_bm_swap(snf);
1089 }
1090
1091 ret = mtk_snand_write_enable(snf);
1092 if (ret)
1093 return ret;
1094
1095 ret = mtk_snand_program_load(snf, page, raw);
1096 if (ret)
1097 return ret;
1098
1099 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1100 if (ret)
1101 return ret;
1102
1103 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1104 if (ret < 0) {
1105 snand_log_chip(snf->pdev,
1106 "Page program command timed out on page %u\n",
1107 page);
1108 return ret;
1109 }
1110
1111 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1112 snand_log_chip(snf->pdev,
1113 "Page program failed on page %u\n", page);
1114 return -EIO;
1115 }
1116
1117 return 0;
1118}
1119
1120int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1121 const void *oob, bool raw)
1122{
1123 if (!snf || (!buf && !oob))
1124 return -EINVAL;
1125
1126 if (addr >= snf->size)
1127 return -EINVAL;
1128
1129 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1130}
1131
1132int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1133{
1134 uint64_t die_addr;
1135 uint32_t page, block;
1136 int ret;
1137
1138 if (!snf)
1139 return -EINVAL;
1140
1141 if (addr >= snf->size)
1142 return -EINVAL;
1143
1144 die_addr = mtk_snand_select_die_address(snf, addr);
1145 block = die_addr >> snf->erasesize_shift;
1146 page = block << (snf->erasesize_shift - snf->writesize_shift);
1147
1148 ret = mtk_snand_write_enable(snf);
1149 if (ret)
1150 return ret;
1151
1152 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1153 if (ret)
1154 return ret;
1155
1156 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1157 if (ret < 0) {
1158 snand_log_chip(snf->pdev,
1159 "Block erase command timed out on block %u\n",
1160 block);
1161 return ret;
1162 }
1163
1164 if (ret & SNAND_STATUS_ERASE_FAIL) {
1165 snand_log_chip(snf->pdev,
1166 "Block erase failed on block %u\n", block);
1167 return -EIO;
1168 }
1169
1170 return 0;
1171}
1172
1173static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1174{
1175 int ret;
1176
1177 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1178 false);
1179 if (ret && ret != -EBADMSG)
1180 return ret;
1181
1182 return snf->buf_cache[0] != 0xff;
1183}
1184
1185static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1186{
1187 int ret;
1188
1189 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1190 true);
1191 if (ret && ret != -EBADMSG)
1192 return ret;
1193
1194 return snf->buf_cache[0] != 0xff;
1195}
1196
1197int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1198{
1199 if (!snf)
1200 return -EINVAL;
1201
1202 if (addr >= snf->size)
1203 return -EINVAL;
1204
1205 addr &= ~snf->erasesize_mask;
1206
1207 if (snf->nfi_soc->bbm_swap)
1208 return mtk_snand_block_isbad_std(snf, addr);
1209
1210 return mtk_snand_block_isbad_mtk(snf, addr);
1211}
1212
1213static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1214{
1215 /* Standard BBM position */
1216 memset(snf->buf_cache, 0xff, snf->oobsize);
1217 snf->buf_cache[0] = 0;
1218
1219 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1220 false);
1221}
1222
1223static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1224{
1225 /* Write the whole page with zeros */
1226 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1227
1228 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1229 snf->buf_cache + snf->writesize, true,
1230 true);
1231}
1232
1233int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1234{
1235 if (!snf)
1236 return -EINVAL;
1237
1238 if (addr >= snf->size)
1239 return -EINVAL;
1240
1241 addr &= ~snf->erasesize_mask;
1242
1243 if (snf->nfi_soc->bbm_swap)
1244 return mtk_snand_block_markbad_std(snf, addr);
1245
1246 return mtk_snand_block_markbad_mtk(snf, addr);
1247}
1248
1249int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1250 const uint8_t *oobbuf, size_t ooblen)
1251{
1252 size_t len = ooblen, sect_fdm_len;
1253 const uint8_t *oob = oobbuf;
1254 uint32_t step = 0;
1255
1256 if (!snf || !oobraw || !oob)
1257 return -EINVAL;
1258
1259 while (len && step < snf->ecc_steps) {
1260 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1261 if (sect_fdm_len > len)
1262 sect_fdm_len = len;
1263
1264 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1265 sect_fdm_len);
1266
1267 len -= sect_fdm_len;
1268 oob += sect_fdm_len;
1269 step++;
1270 }
1271
1272 return len;
1273}
1274
1275int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1276 size_t ooblen, const uint8_t *oobraw)
1277{
1278 size_t len = ooblen, sect_fdm_len;
1279 uint8_t *oob = oobbuf;
1280 uint32_t step = 0;
1281
1282 if (!snf || !oobraw || !oob)
1283 return -EINVAL;
1284
1285 while (len && step < snf->ecc_steps) {
1286 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1287 if (sect_fdm_len > len)
1288 sect_fdm_len = len;
1289
1290 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1291 sect_fdm_len);
1292
1293 len -= sect_fdm_len;
1294 oob += sect_fdm_len;
1295 step++;
1296 }
1297
1298 return len;
1299}
1300
1301int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1302 void *buf, void *oob, size_t ooblen,
1303 size_t *actualooblen, bool raw)
1304{
1305 int ret, oobremain;
1306
1307 if (!snf)
1308 return -EINVAL;
1309
1310 if (!oob)
1311 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1312
1313 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1314 if (ret && ret != -EBADMSG) {
1315 if (actualooblen)
1316 *actualooblen = 0;
1317 return ret;
1318 }
1319
1320 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1321 if (actualooblen)
1322 *actualooblen = ooblen - oobremain;
1323
1324 return ret;
1325}
1326
1327int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1328 const void *buf, const void *oob,
1329 size_t ooblen, size_t *actualooblen, bool raw)
1330{
1331 int oobremain;
1332
1333 if (!snf)
1334 return -EINVAL;
1335
1336 if (!oob)
1337 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1338
1339 memset(snf->buf_cache, 0xff, snf->oobsize);
1340 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1341 if (actualooblen)
1342 *actualooblen = ooblen - oobremain;
1343
1344 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1345}
1346
1347int mtk_snand_get_chip_info(struct mtk_snand *snf,
1348 struct mtk_snand_chip_info *info)
1349{
1350 if (!snf || !info)
1351 return -EINVAL;
1352
1353 info->model = snf->model;
1354 info->chipsize = snf->size;
1355 info->blocksize = snf->erasesize;
1356 info->pagesize = snf->writesize;
1357 info->sparesize = snf->oobsize;
1358 info->spare_per_sector = snf->spare_per_sector;
1359 info->fdm_size = snf->nfi_soc->fdm_size;
1360 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1361 info->num_sectors = snf->ecc_steps;
1362 info->sector_size = snf->nfi_soc->sector_size;
1363 info->ecc_strength = snf->ecc_strength;
1364 info->ecc_bytes = snf->ecc_bytes;
1365
1366 return 0;
1367}
1368
1369int mtk_snand_irq_process(struct mtk_snand *snf)
1370{
1371 uint32_t sta, ien;
1372
1373 if (!snf)
1374 return -EINVAL;
1375
1376 sta = nfi_read32(snf, NFI_INTR_STA);
1377 ien = nfi_read32(snf, NFI_INTR_EN);
1378
1379 if (!(sta & ien))
1380 return 0;
1381
1382 nfi_write32(snf, NFI_INTR_EN, 0);
1383 irq_completion_done(snf->pdev);
1384
1385 return 1;
1386}
1387
1388static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1389{
1390 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1391 int i, mul = 1;
1392
1393 /*
1394 * If we're using the 1KB sector size, HW will automatically
1395 * double the spare size. So we should only use half of the value.
1396 */
1397 if (snf->nfi_soc->sector_size == 1024)
1398 mul = 2;
1399
1400 spare_per_step /= mul;
1401
1402 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1403 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1404 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1405 snf->spare_per_sector *= mul;
1406 return i;
1407 }
1408 }
1409
1410 snand_log_nfi(snf->pdev,
1411 "Page size %u+%u is not supported\n", snf->writesize,
1412 snf->oobsize);
1413
1414 return -1;
1415}
1416
1417static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1418{
1419 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1420 uint32_t sector_size_512;
1421
1422 if (snf->nfi_soc->sector_size == 512) {
1423 sector_size_512 = NFI_SEC_SEL_512;
1424 spare_size_shift = NFI_SPARE_SIZE_S;
1425 } else {
1426 sector_size_512 = 0;
1427 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1428 }
1429
1430 switch (snf->writesize) {
1431 case SZ_512:
1432 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1433 break;
1434 case SZ_2K:
1435 if (snf->nfi_soc->sector_size == 512)
1436 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1437 else
1438 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1439 break;
1440 case SZ_4K:
1441 if (snf->nfi_soc->sector_size == 512)
1442 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1443 else
1444 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1445 break;
1446 case SZ_8K:
1447 if (snf->nfi_soc->sector_size == 512)
1448 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1449 else
1450 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1451 break;
1452 case SZ_16K:
1453 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1454 break;
1455 default:
1456 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1457 snf->writesize);
1458 return -ENOTSUPP;
1459 }
1460
1461 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1462 if (unlikely(spare_size_idx < 0))
1463 return -ENOTSUPP;
1464
1465 snf->raw_sector_size = snf->nfi_soc->sector_size +
1466 snf->spare_per_sector;
1467
1468 /* Setup page format */
1469 nfi_write32(snf, NFI_PAGEFMT,
1470 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1471 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1472 (spare_size_idx << spare_size_shift) |
1473 (pagesize_idx << NFI_PAGE_SIZE_S) |
1474 sector_size_512);
1475
1476 return 0;
1477}
1478
1479static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1480 uint32_t snfi_caps, uint8_t *opcode,
1481 uint8_t *dummy,
1482 const struct snand_io_cap *op_cap)
1483{
1484 uint32_t i, caps;
1485
1486 caps = snfi_caps & op_cap->caps;
1487
1488 i = fls(caps);
1489 if (i > 0) {
1490 *opcode = op_cap->opcodes[i - 1].opcode;
1491 if (dummy)
1492 *dummy = op_cap->opcodes[i - 1].dummy;
1493 return i - 1;
1494 }
1495
1496 return __SNAND_IO_MAX;
1497}
1498
1499static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1500 uint32_t snfi_caps,
1501 const struct snand_io_cap *op_cap)
1502{
1503 enum snand_flash_io idx;
1504
1505 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1506 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1507 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1508 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1509 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1510 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1511 };
1512
1513 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1514 &snf->dummy_rfc, op_cap);
1515 if (idx >= __SNAND_IO_MAX) {
1516 snand_log_snfi(snf->pdev,
1517 "No capable opcode for read from cache\n");
1518 return -ENOTSUPP;
1519 }
1520
1521 snf->mode_rfc = rfc_modes[idx];
1522
1523 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1524 snf->quad_spi_op = true;
1525
1526 return 0;
1527}
1528
1529static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1530 const struct snand_io_cap *op_cap)
1531{
1532 enum snand_flash_io idx;
1533
1534 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1535 [SNAND_IO_1_1_1] = 0,
1536 [SNAND_IO_1_1_4] = 1,
1537 };
1538
1539 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1540 NULL, op_cap);
1541 if (idx >= __SNAND_IO_MAX) {
1542 snand_log_snfi(snf->pdev,
1543 "No capable opcode for program load\n");
1544 return -ENOTSUPP;
1545 }
1546
1547 snf->mode_pl = pl_modes[idx];
1548
1549 if (idx == SNAND_IO_1_1_4)
1550 snf->quad_spi_op = true;
1551
1552 return 0;
1553}
1554
1555static int mtk_snand_setup(struct mtk_snand *snf,
1556 const struct snand_flash_info *snand_info)
1557{
1558 const struct snand_mem_org *memorg = &snand_info->memorg;
1559 uint32_t i, msg_size, snfi_caps;
1560 int ret;
1561
1562 /* Calculate flash memory organization */
1563 snf->model = snand_info->model;
1564 snf->writesize = memorg->pagesize;
1565 snf->oobsize = memorg->sparesize;
1566 snf->erasesize = snf->writesize * memorg->pages_per_block;
1567 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1568 snf->size = snf->die_size * memorg->ndies;
1569 snf->num_dies = memorg->ndies;
1570
1571 snf->writesize_mask = snf->writesize - 1;
1572 snf->erasesize_mask = snf->erasesize - 1;
1573 snf->die_mask = snf->die_size - 1;
1574
1575 snf->writesize_shift = ffs(snf->writesize) - 1;
1576 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1577 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1578
1579 snf->select_die = snand_info->select_die;
1580
1581 /* Determine opcodes for read from cache/program load */
1582 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1583 if (snf->snfi_quad_spi)
1584 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1585
1586 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1587 if (ret)
1588 return ret;
1589
1590 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1591 if (ret)
1592 return ret;
1593
1594 /* ECC and page format */
1595 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1596 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1597 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1598 snf->writesize);
1599 return -ENOTSUPP;
1600 }
1601
1602 ret = mtk_snand_pagefmt_setup(snf);
1603 if (ret)
1604 return ret;
1605
1606 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1607 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1608 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1609 msg_size);
1610 if (ret)
1611 return ret;
1612
1613 nfi_write16(snf, NFI_CNFG, 0);
1614
1615 /* Tuning options */
1616 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
1617 nfi_write32(snf, SNF_DLY_CTL3, (40 << SFCK_SAM_DLY_S));
1618
1619 /* Interrupts */
1620 nfi_read32(snf, NFI_INTR_STA);
1621 nfi_write32(snf, NFI_INTR_EN, 0);
1622
1623 /* Clear SNF done flag */
1624 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1625 nfi_write32(snf, SNF_STA_CTL1, 0);
1626
1627 /* Initialization on all dies */
1628 for (i = 0; i < snf->num_dies; i++) {
1629 mtk_snand_select_die(snf, i);
1630
1631 /* Disable On-Die ECC engine */
1632 ret = mtk_snand_ondie_ecc_control(snf, false);
1633 if (ret)
1634 return ret;
1635
1636 /* Disable block protection */
1637 mtk_snand_unlock(snf);
1638
1639 /* Enable/disable quad-spi */
1640 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1641 }
1642
1643 mtk_snand_select_die(snf, 0);
1644
1645 return 0;
1646}
1647
1648static int mtk_snand_id_probe(struct mtk_snand *snf,
1649 const struct snand_flash_info **snand_info)
1650{
1651 uint8_t id[4], op[2];
1652 int ret;
1653
1654 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1655 op[0] = SNAND_CMD_READID;
1656 op[1] = 0;
1657 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1658 if (ret)
1659 return ret;
1660
1661 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1662 if (*snand_info)
1663 return 0;
1664
1665 /* Read SPI-NAND JEDEC ID, OP + ID */
1666 op[0] = SNAND_CMD_READID;
1667 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1668 if (ret)
1669 return ret;
1670
1671 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1672 if (*snand_info)
1673 return 0;
1674
1675 snand_log_chip(snf->pdev,
1676 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1677 id[0], id[1], id[2], id[3]);
1678
1679 return -EINVAL;
1680}
1681
1682int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1683 struct mtk_snand **psnf)
1684{
1685 const struct snand_flash_info *snand_info;
1686 struct mtk_snand tmpsnf, *snf;
1687 uint32_t rawpage_size;
1688 int ret;
1689
1690 if (!pdata || !psnf)
1691 return -EINVAL;
1692
1693 if (pdata->soc >= __SNAND_SOC_MAX) {
1694 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1695 pdata->soc);
1696 return -EINVAL;
1697 }
1698
1699 /* Dummy instance only for initial reset and id probe */
1700 tmpsnf.nfi_base = pdata->nfi_base;
1701 tmpsnf.ecc_base = pdata->ecc_base;
1702 tmpsnf.soc = pdata->soc;
1703 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1704 tmpsnf.pdev = dev;
1705
1706 /* Switch to SNFI mode */
1707 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1708
1709 /* Reset SNFI & NFI */
1710 mtk_snand_mac_reset(&tmpsnf);
1711 mtk_nfi_reset(&tmpsnf);
1712
1713 /* Reset SPI-NAND chip */
1714 ret = mtk_snand_chip_reset(&tmpsnf);
1715 if (ret) {
1716 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1717 return ret;
1718 }
1719
1720 /* Probe SPI-NAND flash by JEDEC ID */
1721 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1722 if (ret)
1723 return ret;
1724
1725 rawpage_size = snand_info->memorg.pagesize +
1726 snand_info->memorg.sparesize;
1727
1728 /* Allocate memory for instance and cache */
1729 snf = generic_mem_alloc(dev, sizeof(*snf) + rawpage_size);
1730 if (!snf) {
1731 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1732 return -ENOMEM;
1733 }
1734
1735 snf->buf_cache = (uint8_t *)((uintptr_t)snf + sizeof(*snf));
1736
1737 /* Allocate memory for DMA buffer */
1738 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1739 if (!snf->page_cache) {
1740 generic_mem_free(dev, snf);
1741 snand_log_chip(dev,
1742 "Failed to allocate memory for DMA buffer\n");
1743 return -ENOMEM;
1744 }
1745
1746 /* Fill up instance */
1747 snf->pdev = dev;
1748 snf->nfi_base = pdata->nfi_base;
1749 snf->ecc_base = pdata->ecc_base;
1750 snf->soc = pdata->soc;
1751 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1752 snf->snfi_quad_spi = pdata->quad_spi;
1753
1754 /* Initialize SNFI & ECC engine */
1755 ret = mtk_snand_setup(snf, snand_info);
1756 if (ret) {
1757 dma_mem_free(dev, snf->page_cache);
1758 generic_mem_free(dev, snf);
1759 return ret;
1760 }
1761
1762 *psnf = snf;
1763
1764 return 0;
1765}
1766
1767int mtk_snand_cleanup(struct mtk_snand *snf)
1768{
1769 if (!snf)
1770 return 0;
1771
1772 dma_mem_free(snf->pdev, snf->page_cache);
1773 generic_mem_free(snf->pdev, snf);
1774
1775 return 0;
1776}