blob: 341e8c8d6366703c861b8c85224ceb23d4959c5c [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
developerae50ce92021-05-18 19:08:57 +080060#define NFI_ADDRCNTR 0x070
61#define SEC_CNTR GENMASK(16, 12)
developerab415832021-05-26 09:49:32 +080062#define SEC_CNTR_S 12
63#define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080064
developerfd40db22021-04-29 10:08:25 +080065#define NFI_STRADDR 0x080
66
developerae50ce92021-05-18 19:08:57 +080067#define NFI_BYTELEN 0x084
developerab415832021-05-26 09:49:32 +080068#define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080069
developerfd40db22021-04-29 10:08:25 +080070#define NFI_FDM0L 0x0a0
71#define NFI_FDM0M 0x0a4
72#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
73#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
74
75#define NFI_DEBUG_CON1 0x220
76#define WBUF_EN BIT(2)
77
78#define NFI_MASTERSTA 0x224
79#define MAS_ADDR GENMASK(11, 9)
80#define MAS_RD GENMASK(8, 6)
81#define MAS_WR GENMASK(5, 3)
82#define MAS_RDDLY GENMASK(2, 0)
83#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
84#define AHB_BUS_BUSY BIT(1)
85#define BUS_BUSY BIT(0)
developer80309642022-08-17 10:19:17 +080086#define NFI_MASTERSTA_MASK_7981 (AHB_BUS_BUSY | BUS_BUSY)
developerfd40db22021-04-29 10:08:25 +080087#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
88
89/* SNFI registers */
90#define SNF_MAC_CTL 0x500
91#define MAC_XIO_SEL BIT(4)
92#define SF_MAC_EN BIT(3)
93#define SF_TRIG BIT(2)
94#define WIP_READY BIT(1)
95#define WIP BIT(0)
96
97#define SNF_MAC_OUTL 0x504
98#define SNF_MAC_INL 0x508
99
100#define SNF_RD_CTL2 0x510
101#define DATA_READ_DUMMY_S 8
102#define DATA_READ_CMD_S 0
103
104#define SNF_RD_CTL3 0x514
105
106#define SNF_PG_CTL1 0x524
107#define PG_LOAD_CMD_S 8
108
109#define SNF_PG_CTL2 0x528
110
111#define SNF_MISC_CTL 0x538
112#define SW_RST BIT(28)
113#define FIFO_RD_LTC_S 25
114#define PG_LOAD_X4_EN BIT(20)
115#define DATA_READ_MODE_S 16
116#define DATA_READ_MODE GENMASK(18, 16)
117#define DATA_READ_MODE_X1 0
118#define DATA_READ_MODE_X2 1
119#define DATA_READ_MODE_X4 2
120#define DATA_READ_MODE_DUAL 5
121#define DATA_READ_MODE_QUAD 6
developer17ded802021-07-06 20:48:25 +0800122#define LATCH_LAT_S 8
123#define LATCH_LAT GENMASK(9, 8)
developerfd40db22021-04-29 10:08:25 +0800124#define PG_LOAD_CUSTOM_EN BIT(7)
125#define DATARD_CUSTOM_EN BIT(6)
126#define CS_DESELECT_CYC_S 0
127
128#define SNF_MISC_CTL2 0x53c
129#define PROGRAM_LOAD_BYTE_NUM_S 16
130#define READ_DATA_BYTE_NUM_S 11
131
132#define SNF_DLY_CTL3 0x548
133#define SFCK_SAM_DLY_S 0
134
135#define SNF_STA_CTL1 0x550
136#define CUS_PG_DONE BIT(28)
137#define CUS_READ_DONE BIT(27)
138#define SPI_STATE_S 0
139#define SPI_STATE GENMASK(3, 0)
140
141#define SNF_CFG 0x55c
142#define SPI_MODE BIT(0)
143
144#define SNF_GPRAM 0x800
145#define SNF_GPRAM_SIZE 0xa0
146
147#define SNFI_POLL_INTERVAL 1000000
148
149static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
150
developer80309642022-08-17 10:19:17 +0800151static const uint8_t mt7981_spare_sizes[] = {
152 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
153 67, 74
154};
155
developerfd40db22021-04-29 10:08:25 +0800156static const uint8_t mt7986_spare_sizes[] = {
157 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
158 67, 74
159};
160
161static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
162 [SNAND_SOC_MT7622] = {
163 .sector_size = 512,
164 .max_sectors = 8,
165 .fdm_size = 8,
166 .fdm_ecc_size = 1,
167 .fifo_size = 32,
168 .bbm_swap = false,
169 .empty_page_check = false,
170 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
171 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800172 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
173 .latch_lat = 0,
174 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800175 },
176 [SNAND_SOC_MT7629] = {
177 .sector_size = 512,
178 .max_sectors = 8,
179 .fdm_size = 8,
180 .fdm_ecc_size = 1,
181 .fifo_size = 32,
182 .bbm_swap = true,
183 .empty_page_check = false,
184 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
185 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800186 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
187 .latch_lat = 0,
188 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800189 },
developer80309642022-08-17 10:19:17 +0800190 [SNAND_SOC_MT7981] = {
191 .sector_size = 1024,
192 .max_sectors = 16,
193 .fdm_size = 8,
194 .fdm_ecc_size = 1,
195 .fifo_size = 64,
196 .bbm_swap = true,
197 .empty_page_check = true,
198 .mastersta_mask = NFI_MASTERSTA_MASK_7981,
199 .spare_sizes = mt7981_spare_sizes,
200 .num_spare_size = ARRAY_SIZE(mt7981_spare_sizes),
201 .latch_lat = 0,
202 .sample_delay = 40
203 },
developerfd40db22021-04-29 10:08:25 +0800204 [SNAND_SOC_MT7986] = {
205 .sector_size = 1024,
206 .max_sectors = 16,
207 .fdm_size = 8,
208 .fdm_ecc_size = 1,
209 .fifo_size = 64,
210 .bbm_swap = true,
211 .empty_page_check = true,
212 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
213 .spare_sizes = mt7986_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800214 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes),
developer7d60cdf2021-08-05 15:53:17 +0800215 .latch_lat = 0,
216 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800217 },
developerdfd23622022-11-25 19:02:20 +0800218 [SNAND_SOC_MT7988] = {
219 .sector_size = 1024,
220 .max_sectors = 16,
221 .fdm_size = 8,
222 .fdm_ecc_size = 1,
223 .fifo_size = 64,
224 .bbm_swap = true,
225 .empty_page_check = true,
226 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
227 .spare_sizes = mt7986_spare_sizes,
228 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes),
229 .latch_lat = 0,
230 .sample_delay = 40
231 },
developerfd40db22021-04-29 10:08:25 +0800232};
233
234static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
235{
236 return readl(snf->nfi_base + reg);
237}
238
239static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
240 uint32_t val)
241{
242 writel(val, snf->nfi_base + reg);
243}
244
245static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
246 uint16_t val)
247{
248 writew(val, snf->nfi_base + reg);
249}
250
251static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
252 uint32_t set)
253{
254 uint32_t val;
255
256 val = readl(snf->nfi_base + reg);
257 val &= ~clr;
258 val |= set;
259 writel(val, snf->nfi_base + reg);
260}
261
262static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
263 const uint8_t *data, uint32_t len)
264{
265 uint32_t i, val = 0, es = sizeof(uint32_t);
266
267 for (i = reg; i < reg + len; i++) {
268 val |= ((uint32_t)*data++) << (8 * (i % es));
269
270 if (i % es == es - 1 || i == reg + len - 1) {
271 nfi_write32(snf, i & ~(es - 1), val);
272 val = 0;
273 }
274 }
275}
276
277static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
278 uint32_t len)
279{
280 uint32_t i, val = 0, es = sizeof(uint32_t);
281
282 for (i = reg; i < reg + len; i++) {
283 if (i == reg || i % es == 0)
284 val = nfi_read32(snf, i & ~(es - 1));
285
286 *data++ = (uint8_t)(val >> (8 * (i % es)));
287 }
288}
289
290static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
291{
292 uint8_t tmp = *bm1;
293 *bm1 = *bm2;
294 *bm2 = tmp;
295}
296
297static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
298{
299 uint32_t fdm_bbm_pos;
300
301 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
302 return;
303
304 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
305 snf->nfi_soc->sector_size;
306 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
307 &snf->page_cache[snf->writesize]);
308}
309
310static void mtk_snand_bm_swap(struct mtk_snand *snf)
311{
312 uint32_t buf_bbm_pos, fdm_bbm_pos;
313
314 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
315 return;
316
317 buf_bbm_pos = snf->writesize -
318 (snf->ecc_steps - 1) * snf->spare_per_sector;
319 fdm_bbm_pos = snf->writesize +
320 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
321 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
322 &snf->page_cache[buf_bbm_pos]);
323}
324
325static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
326{
327 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
328
329 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
330 return;
331
332 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
333 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
334 snf->nfi_soc->sector_size;
335 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
336 &snf->page_cache[fdm_bbm_pos2]);
337}
338
339static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
340{
341 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
342
343 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
344 return;
345
346 fdm_bbm_pos1 = snf->writesize;
347 fdm_bbm_pos2 = snf->writesize +
348 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
349 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
350 &snf->page_cache[fdm_bbm_pos2]);
351}
352
353static int mtk_nfi_reset(struct mtk_snand *snf)
354{
355 uint32_t val, fifo_mask;
356 int ret;
357
358 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
359
360 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
361 !(val & snf->nfi_soc->mastersta_mask), 0,
362 SNFI_POLL_INTERVAL);
363 if (ret) {
364 snand_log_nfi(snf->pdev,
365 "NFI master is still busy after reset\n");
366 return ret;
367 }
368
369 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
370 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
371 SNFI_POLL_INTERVAL);
372 if (ret) {
373 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
374 return ret;
375 }
376
377 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
378 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
379 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
380 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
381 if (ret) {
382 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
383 return ret;
384 }
385
386 return 0;
387}
388
389static int mtk_snand_mac_reset(struct mtk_snand *snf)
390{
391 int ret;
392 uint32_t val;
393
394 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
395
396 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
397 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
398 if (ret)
399 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
400
401 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
developer17ded802021-07-06 20:48:25 +0800402 (10 << CS_DESELECT_CYC_S) | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800403
404 return ret;
405}
406
407static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
408 uint32_t inlen)
409{
410 int ret;
411 uint32_t val;
412
413 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
414 nfi_write32(snf, SNF_MAC_OUTL, outlen);
415 nfi_write32(snf, SNF_MAC_INL, inlen);
416
417 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
418
419 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
420 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
421 if (ret) {
422 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
423 goto cleanup;
424 }
425
426 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
427 !(val & WIP), 0, SNFI_POLL_INTERVAL);
428 if (ret) {
429 snand_log_snfi(snf->pdev,
430 "Timed out waiting for WIP cleared\n");
431 }
432
433cleanup:
434 nfi_write32(snf, SNF_MAC_CTL, 0);
435
436 return ret;
437}
438
439int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
440 uint8_t *in, uint32_t inlen)
441{
442 int ret;
443
444 if (outlen + inlen > SNF_GPRAM_SIZE)
445 return -EINVAL;
446
447 mtk_snand_mac_reset(snf);
448
449 nfi_write_data(snf, SNF_GPRAM, out, outlen);
450
451 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
452 if (ret)
453 return ret;
454
455 if (!inlen)
456 return 0;
457
458 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
459
460 return 0;
461}
462
463static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
464{
465 uint8_t op[2], val;
466 int ret;
467
468 op[0] = SNAND_CMD_GET_FEATURE;
469 op[1] = (uint8_t)addr;
470
471 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
472 if (ret)
473 return ret;
474
475 return val;
476}
477
478int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
479{
480 uint8_t op[3];
481
482 op[0] = SNAND_CMD_SET_FEATURE;
483 op[1] = (uint8_t)addr;
484 op[2] = (uint8_t)val;
485
486 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
487}
488
489static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
490{
491 int val;
492 mtk_snand_time_t time_start, tmo;
493
494 time_start = timer_get_ticks();
495 tmo = timer_time_to_tick(wait_us);
496
497 do {
498 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
499 if (!(val & SNAND_STATUS_OIP))
500 return val & (SNAND_STATUS_ERASE_FAIL |
501 SNAND_STATUS_PROGRAM_FAIL);
502 } while (!timer_is_timeout(time_start, tmo));
503
504 return -ETIMEDOUT;
505}
506
507int mtk_snand_chip_reset(struct mtk_snand *snf)
508{
509 uint8_t op = SNAND_CMD_RESET;
510 int ret;
511
512 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
513 if (ret)
514 return ret;
515
516 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
517 if (ret < 0)
518 return ret;
519
520 return 0;
521}
522
523static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
524 uint8_t set)
525{
526 int val, newval;
527 int ret;
528
529 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
530 if (val < 0) {
531 snand_log_chip(snf->pdev,
532 "Failed to get configuration feature\n");
533 return val;
534 }
535
536 newval = (val & (~clr)) | set;
537
538 if (newval == val)
539 return 0;
540
541 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
542 (uint8_t)newval);
543 if (val < 0) {
544 snand_log_chip(snf->pdev,
545 "Failed to set configuration feature\n");
546 return ret;
547 }
548
549 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
550 if (val < 0) {
551 snand_log_chip(snf->pdev,
552 "Failed to get configuration feature\n");
553 return val;
554 }
555
556 if (newval != val)
557 return -ENOTSUPP;
558
559 return 0;
560}
561
562static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
563{
564 int ret;
565
566 if (enable)
567 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
568 else
569 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
570
571 if (ret) {
572 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
573 enable ? "enable" : "disable");
574 }
575
576 return ret;
577}
578
579static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
580{
581 int ret;
582
583 if (enable) {
584 ret = mtk_snand_config_feature(snf, 0,
585 SNAND_FEATURE_QUAD_ENABLE);
586 } else {
587 ret = mtk_snand_config_feature(snf,
588 SNAND_FEATURE_QUAD_ENABLE, 0);
589 }
590
591 if (ret) {
592 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
593 enable ? "enable" : "disable");
594 }
595
596 return ret;
597}
598
599static int mtk_snand_unlock(struct mtk_snand *snf)
600{
601 int ret;
602
603 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
604 if (ret) {
605 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
606 return ret;
607 }
608
609 return 0;
610}
611
612static int mtk_snand_write_enable(struct mtk_snand *snf)
613{
614 uint8_t op = SNAND_CMD_WRITE_ENABLE;
615 int ret, val;
616
617 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
618 if (ret)
619 return ret;
620
621 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
622 if (val < 0)
623 return ret;
624
625 if (val & SNAND_STATUS_WEL)
626 return 0;
627
628 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
629
630 return -ENOTSUPP;
631}
632
633static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
634{
635 if (!snf->select_die)
636 return 0;
637
638 return snf->select_die(snf, dieidx);
639}
640
641static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
642 uint64_t addr)
643{
644 uint32_t dieidx;
645
646 if (!snf->select_die)
647 return addr;
648
649 dieidx = addr >> snf->die_shift;
650
651 mtk_snand_select_die(snf, dieidx);
652
653 return addr & snf->die_mask;
654}
655
656static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
657 uint32_t page)
658{
659 uint32_t pages_per_block;
660
661 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
662
663 if (page & pages_per_block)
664 return 1 << (snf->writesize_shift + 1);
665
666 return 0;
667}
668
669static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
670{
671 uint8_t op[4];
672
673 op[0] = cmd;
674 op[1] = (page >> 16) & 0xff;
675 op[2] = (page >> 8) & 0xff;
676 op[3] = page & 0xff;
677
678 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
679}
680
681static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
682{
683 uint32_t vall, valm;
684 uint8_t *oobptr = buf;
685 int i, j;
686
687 for (i = 0; i < snf->ecc_steps; i++) {
688 vall = nfi_read32(snf, NFI_FDML(i));
689 valm = nfi_read32(snf, NFI_FDMM(i));
690
691 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
692 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
693
694 oobptr += snf->nfi_soc->fdm_size;
695 }
696}
697
developer4da1bed2021-05-08 17:30:37 +0800698static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
699 uint32_t sect, uint8_t *oob)
700{
701 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
702 uint32_t coladdr, raw_offs, offs;
703 uint8_t op[4];
704
705 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
706 snand_log_snfi(snf->pdev,
707 "ECC parity size does not fit the GPRAM\n");
708 return -ENOTSUPP;
709 }
710
711 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
712 snf->nfi_soc->fdm_size;
713 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
714
715 /* Column address with plane bit */
716 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
717
718 op[0] = SNAND_CMD_READ_FROM_CACHE;
719 op[1] = (coladdr >> 8) & 0xff;
720 op[2] = coladdr & 0xff;
721 op[3] = 0;
722
723 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
724}
725
726static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
727{
728 uint8_t *oob = snf->page_cache + snf->writesize;
729 int i, rc, ret = 0, max_bitflips = 0;
730
731 for (i = 0; i < snf->ecc_steps; i++) {
732 if (snf->sect_bf[i] >= 0) {
733 if (snf->sect_bf[i] > max_bitflips)
734 max_bitflips = snf->sect_bf[i];
735 continue;
736 }
737
738 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
739 if (rc)
740 return rc;
741
742 rc = mtk_ecc_fixup_empty_sector(snf, i);
743 if (rc < 0) {
744 ret = -EBADMSG;
745
746 snand_log_ecc(snf->pdev,
747 "Uncorrectable bitflips in page %u sect %u\n",
748 page, i);
developer5136a3f2021-05-20 10:58:54 +0800749 } else if (rc) {
developer4da1bed2021-05-08 17:30:37 +0800750 snf->sect_bf[i] = rc;
751
752 if (snf->sect_bf[i] > max_bitflips)
753 max_bitflips = snf->sect_bf[i];
754
755 snand_log_ecc(snf->pdev,
756 "%u bitflip%s corrected in page %u sect %u\n",
757 rc, rc > 1 ? "s" : "", page, i);
developer5136a3f2021-05-20 10:58:54 +0800758 } else {
759 snf->sect_bf[i] = 0;
developer4da1bed2021-05-08 17:30:37 +0800760 }
761 }
762
763 return ret ? ret : max_bitflips;
764}
765
developerfd40db22021-04-29 10:08:25 +0800766static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
767{
developerae50ce92021-05-18 19:08:57 +0800768 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800769 uintptr_t dma_addr;
770 int ret;
771
772 /* Column address with plane bit */
773 coladdr = mtk_snand_get_plane_address(snf, page);
774
775 mtk_snand_mac_reset(snf);
776 mtk_nfi_reset(snf);
777
778 /* Command and dummy cycles */
779 nfi_write32(snf, SNF_RD_CTL2,
780 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
781 (snf->opcode_rfc << DATA_READ_CMD_S));
782
783 /* Column address */
784 nfi_write32(snf, SNF_RD_CTL3, coladdr);
785
786 /* Set read mode */
787 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
developer17ded802021-07-06 20:48:25 +0800788 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
789 mode | DATARD_CUSTOM_EN | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800790
791 /* Set bytes to read */
792 rwbytes = snf->ecc_steps * snf->raw_sector_size;
793 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
794 rwbytes);
795
796 /* NFI read prepare */
797 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
798 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
799 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
800
801 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
802
803 /* Prepare for DMA read */
804 len = snf->writesize + snf->oobsize;
805 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
806 if (ret) {
807 snand_log_nfi(snf->pdev,
808 "DMA map from device failed with %d\n", ret);
809 return ret;
810 }
811
812 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
813
814 if (!raw)
815 mtk_snand_ecc_decoder_start(snf);
816
817 /* Prepare for custom read interrupt */
818 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
819 irq_completion_init(snf->pdev);
820
821 /* Trigger NFI into custom mode */
822 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
823
824 /* Start DMA read */
825 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
826 nfi_write16(snf, NFI_STRDATA, STR_DATA);
827
828 /* Wait for operation finished */
829 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
830 CUS_READ_DONE, SNFI_POLL_INTERVAL);
831 if (ret) {
832 snand_log_nfi(snf->pdev,
833 "DMA timed out for reading from cache\n");
834 goto cleanup;
835 }
836
developerae50ce92021-05-18 19:08:57 +0800837 /* Wait for BUS_SEC_CNTR returning expected value */
838 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
839 BUS_SEC_CNTR(val) >= snf->ecc_steps,
840 0, SNFI_POLL_INTERVAL);
841 if (ret) {
842 snand_log_nfi(snf->pdev,
843 "Timed out waiting for BUS_SEC_CNTR\n");
844 goto cleanup;
845 }
846
847 /* Wait for bus becoming idle */
848 ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
849 !(val & snf->nfi_soc->mastersta_mask),
850 0, SNFI_POLL_INTERVAL);
851 if (ret) {
852 snand_log_nfi(snf->pdev,
853 "Timed out waiting for bus becoming idle\n");
854 goto cleanup;
855 }
856
developerfd40db22021-04-29 10:08:25 +0800857 if (!raw) {
858 ret = mtk_ecc_wait_decoder_done(snf);
859 if (ret)
860 goto cleanup;
861
862 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
863
developer4da1bed2021-05-08 17:30:37 +0800864 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800865 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800866
867 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800868 }
869
870cleanup:
871 /* DMA cleanup */
872 dma_mem_unmap(snf->pdev, dma_addr, len, false);
873
874 /* Stop read */
875 nfi_write32(snf, NFI_CON, 0);
developer4b9635a2021-05-18 14:22:39 +0800876 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +0800877
878 /* Clear SNF done flag */
879 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
880 nfi_write32(snf, SNF_STA_CTL1, 0);
881
882 /* Disable interrupt */
883 nfi_read32(snf, NFI_INTR_STA);
884 nfi_write32(snf, NFI_INTR_EN, 0);
885
developer17ded802021-07-06 20:48:25 +0800886 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN | LATCH_LAT, 0);
developerfd40db22021-04-29 10:08:25 +0800887
888 return ret;
889}
890
891static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
892{
893 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
894 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
895 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
896
897 for (i = 0; i < snf->ecc_steps; i++) {
898 raw_sector = snf->page_cache + i * snf->raw_sector_size;
899
900 if (buf) {
901 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
902 bufptr += snf->nfi_soc->sector_size;
903 }
904
905 raw_sector += snf->nfi_soc->sector_size;
906
907 if (oob) {
908 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
909 oobptr += snf->nfi_soc->fdm_size;
910 raw_sector += snf->nfi_soc->fdm_size;
911
912 memcpy(eccptr, raw_sector, ecc_bytes);
913 eccptr += ecc_bytes;
914 }
915 }
916}
917
918static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
919 void *buf, void *oob, bool raw, bool format)
920{
921 uint64_t die_addr;
developeref87e252021-07-20 22:57:05 +0800922 uint32_t page, dly_ctrl3;
923 int ret, retry_cnt = 0;
developerfd40db22021-04-29 10:08:25 +0800924
925 die_addr = mtk_snand_select_die_address(snf, addr);
926 page = die_addr >> snf->writesize_shift;
927
developeref87e252021-07-20 22:57:05 +0800928 dly_ctrl3 = nfi_read32(snf, SNF_DLY_CTL3);
929
developerfd40db22021-04-29 10:08:25 +0800930 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
931 if (ret)
932 return ret;
933
934 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
935 if (ret < 0) {
936 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
937 return ret;
938 }
939
developeref87e252021-07-20 22:57:05 +0800940retry:
developerfd40db22021-04-29 10:08:25 +0800941 ret = mtk_snand_read_cache(snf, page, raw);
942 if (ret < 0 && ret != -EBADMSG)
943 return ret;
944
developeref87e252021-07-20 22:57:05 +0800945 if (ret == -EBADMSG && retry_cnt < 16) {
946 nfi_write32(snf, SNF_DLY_CTL3, retry_cnt * 2);
947 retry_cnt++;
948 goto retry;
949 }
950
951 if (retry_cnt) {
952 if(ret == -EBADMSG) {
953 nfi_write32(snf, SNF_DLY_CTL3, dly_ctrl3);
954 snand_log_chip(snf->pdev,
955 "NFI calibration failed. Original sample delay: 0x%x\n",
956 dly_ctrl3);
957 } else {
958 snand_log_chip(snf->pdev,
959 "NFI calibration passed. New sample delay: 0x%x\n",
960 nfi_read32(snf, SNF_DLY_CTL3));
961 }
962 }
963
developerfd40db22021-04-29 10:08:25 +0800964 if (raw) {
965 if (format) {
966 mtk_snand_bm_swap_raw(snf);
967 mtk_snand_fdm_bm_swap_raw(snf);
968 mtk_snand_from_raw_page(snf, buf, oob);
969 } else {
970 if (buf)
971 memcpy(buf, snf->page_cache, snf->writesize);
972
973 if (oob) {
974 memset(oob, 0xff, snf->oobsize);
975 memcpy(oob, snf->page_cache + snf->writesize,
976 snf->ecc_steps * snf->spare_per_sector);
977 }
978 }
979 } else {
980 mtk_snand_bm_swap(snf);
981 mtk_snand_fdm_bm_swap(snf);
982
983 if (buf)
984 memcpy(buf, snf->page_cache, snf->writesize);
985
986 if (oob) {
987 memset(oob, 0xff, snf->oobsize);
988 memcpy(oob, snf->page_cache + snf->writesize,
989 snf->ecc_steps * snf->nfi_soc->fdm_size);
990 }
991 }
992
993 return ret;
994}
995
996int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
997 void *oob, bool raw)
998{
999 if (!snf || (!buf && !oob))
1000 return -EINVAL;
1001
1002 if (addr >= snf->size)
1003 return -EINVAL;
1004
1005 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
1006}
1007
1008static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
1009{
1010 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
1011 const uint8_t *oobptr = buf;
1012 int i, j;
1013
1014 for (i = 0; i < snf->ecc_steps; i++) {
1015 vall = 0;
1016 valm = 0;
1017
1018 for (j = 0; j < 8; j++) {
1019 if (j < 4)
1020 vall |= (j < fdm_size ? oobptr[j] : 0xff)
1021 << (j * 8);
1022 else
1023 valm |= (j < fdm_size ? oobptr[j] : 0xff)
1024 << ((j - 4) * 8);
1025 }
1026
1027 nfi_write32(snf, NFI_FDML(i), vall);
1028 nfi_write32(snf, NFI_FDMM(i), valm);
1029
1030 oobptr += fdm_size;
1031 }
1032}
1033
1034static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
1035 bool raw)
1036{
developerae50ce92021-05-18 19:08:57 +08001037 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +08001038 uintptr_t dma_addr;
1039 int ret;
1040
1041 /* Column address with plane bit */
1042 coladdr = mtk_snand_get_plane_address(snf, page);
1043
1044 mtk_snand_mac_reset(snf);
1045 mtk_nfi_reset(snf);
1046
1047 /* Write FDM registers if necessary */
1048 if (!raw)
1049 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
1050
1051 /* Command */
1052 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
1053
1054 /* Column address */
1055 nfi_write32(snf, SNF_PG_CTL2, coladdr);
1056
1057 /* Set write mode */
1058 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
1059 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
1060
1061 /* Set bytes to write */
1062 rwbytes = snf->ecc_steps * snf->raw_sector_size;
1063 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
1064 rwbytes);
1065
1066 /* NFI write prepare */
1067 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
1068 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1069 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
1070
1071 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
1072
1073 /* Prepare for DMA write */
1074 len = snf->writesize + snf->oobsize;
1075 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
1076 if (ret) {
1077 snand_log_nfi(snf->pdev,
1078 "DMA map to device failed with %d\n", ret);
1079 return ret;
1080 }
1081
1082 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
1083
1084 if (!raw)
1085 mtk_snand_ecc_encoder_start(snf);
1086
1087 /* Prepare for custom write interrupt */
1088 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1089 irq_completion_init(snf->pdev);
1090
1091 /* Trigger NFI into custom mode */
1092 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1093
1094 /* Start DMA write */
1095 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1096 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1097
1098 /* Wait for operation finished */
1099 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1100 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1101 if (ret) {
1102 snand_log_nfi(snf->pdev,
1103 "DMA timed out for program load\n");
1104 goto cleanup;
1105 }
1106
developerab415832021-05-26 09:49:32 +08001107 /* Wait for NFI_SEC_CNTR returning expected value */
1108 ret = read32_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1109 NFI_SEC_CNTR(val) >= snf->ecc_steps,
developerae50ce92021-05-18 19:08:57 +08001110 0, SNFI_POLL_INTERVAL);
1111 if (ret) {
1112 snand_log_nfi(snf->pdev,
1113 "Timed out waiting for BUS_SEC_CNTR\n");
1114 goto cleanup;
1115 }
1116
developerfd40db22021-04-29 10:08:25 +08001117 if (!raw)
1118 mtk_snand_ecc_encoder_stop(snf);
1119
1120cleanup:
1121 /* DMA cleanup */
1122 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1123
1124 /* Stop write */
developer4b9635a2021-05-18 14:22:39 +08001125 nfi_write32(snf, NFI_CON, 0);
1126 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +08001127
1128 /* Clear SNF done flag */
1129 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1130 nfi_write32(snf, SNF_STA_CTL1, 0);
1131
1132 /* Disable interrupt */
1133 nfi_read32(snf, NFI_INTR_STA);
1134 nfi_write32(snf, NFI_INTR_EN, 0);
1135
1136 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1137
1138 return ret;
1139}
1140
1141static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1142 const void *buf, const void *oob,
1143 bool empty_ecc)
1144{
1145 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1146 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1147 const uint8_t *bufptr = buf, *oobptr = oob;
1148 uint8_t *raw_sector;
1149
1150 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1151 for (i = 0; i < snf->ecc_steps; i++) {
1152 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1153
1154 if (buf) {
1155 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1156 bufptr += snf->nfi_soc->sector_size;
1157 }
1158
1159 raw_sector += snf->nfi_soc->sector_size;
1160
1161 if (oob) {
1162 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1163 oobptr += snf->nfi_soc->fdm_size;
1164 raw_sector += snf->nfi_soc->fdm_size;
1165
1166 if (empty_ecc)
1167 memset(raw_sector, 0xff, ecc_bytes);
1168 else
1169 memcpy(raw_sector, eccptr, ecc_bytes);
1170 eccptr += ecc_bytes;
1171 }
1172 }
1173}
1174
1175static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1176 const void *oob)
1177{
1178 const uint8_t *p = buf;
1179 uint32_t i, j;
1180
1181 if (buf) {
1182 for (i = 0; i < snf->writesize; i++) {
1183 if (p[i] != 0xff)
1184 return false;
1185 }
1186 }
1187
1188 if (oob) {
1189 for (j = 0; j < snf->ecc_steps; j++) {
1190 p = oob + j * snf->nfi_soc->fdm_size;
1191
1192 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1193 if (p[i] != 0xff)
1194 return false;
1195 }
1196 }
1197 }
1198
1199 return true;
1200}
1201
1202static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1203 const void *buf, const void *oob,
1204 bool raw, bool format)
1205{
1206 uint64_t die_addr;
1207 bool empty_ecc = false;
1208 uint32_t page;
1209 int ret;
1210
1211 die_addr = mtk_snand_select_die_address(snf, addr);
1212 page = die_addr >> snf->writesize_shift;
1213
1214 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1215 /*
1216 * If the data in the page to be ecc-ed is full 0xff,
1217 * change to raw write mode
1218 */
1219 raw = true;
1220 format = true;
1221
1222 /* fill ecc parity code region with 0xff */
1223 empty_ecc = true;
1224 }
1225
1226 if (raw) {
1227 if (format) {
1228 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1229 mtk_snand_fdm_bm_swap_raw(snf);
1230 mtk_snand_bm_swap_raw(snf);
1231 } else {
1232 memset(snf->page_cache, 0xff,
1233 snf->writesize + snf->oobsize);
1234
1235 if (buf)
1236 memcpy(snf->page_cache, buf, snf->writesize);
1237
1238 if (oob) {
1239 memcpy(snf->page_cache + snf->writesize, oob,
1240 snf->ecc_steps * snf->spare_per_sector);
1241 }
1242 }
1243 } else {
1244 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1245 if (buf)
1246 memcpy(snf->page_cache, buf, snf->writesize);
1247
1248 if (oob) {
1249 memcpy(snf->page_cache + snf->writesize, oob,
1250 snf->ecc_steps * snf->nfi_soc->fdm_size);
1251 }
1252
1253 mtk_snand_fdm_bm_swap(snf);
1254 mtk_snand_bm_swap(snf);
1255 }
1256
1257 ret = mtk_snand_write_enable(snf);
1258 if (ret)
1259 return ret;
1260
1261 ret = mtk_snand_program_load(snf, page, raw);
1262 if (ret)
1263 return ret;
1264
1265 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1266 if (ret)
1267 return ret;
1268
1269 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1270 if (ret < 0) {
1271 snand_log_chip(snf->pdev,
1272 "Page program command timed out on page %u\n",
1273 page);
1274 return ret;
1275 }
1276
1277 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1278 snand_log_chip(snf->pdev,
1279 "Page program failed on page %u\n", page);
1280 return -EIO;
1281 }
1282
1283 return 0;
1284}
1285
1286int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1287 const void *oob, bool raw)
1288{
1289 if (!snf || (!buf && !oob))
1290 return -EINVAL;
1291
1292 if (addr >= snf->size)
1293 return -EINVAL;
1294
1295 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1296}
1297
1298int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1299{
1300 uint64_t die_addr;
1301 uint32_t page, block;
1302 int ret;
1303
1304 if (!snf)
1305 return -EINVAL;
1306
1307 if (addr >= snf->size)
1308 return -EINVAL;
1309
1310 die_addr = mtk_snand_select_die_address(snf, addr);
1311 block = die_addr >> snf->erasesize_shift;
1312 page = block << (snf->erasesize_shift - snf->writesize_shift);
1313
1314 ret = mtk_snand_write_enable(snf);
1315 if (ret)
1316 return ret;
1317
1318 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1319 if (ret)
1320 return ret;
1321
1322 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1323 if (ret < 0) {
1324 snand_log_chip(snf->pdev,
1325 "Block erase command timed out on block %u\n",
1326 block);
1327 return ret;
1328 }
1329
1330 if (ret & SNAND_STATUS_ERASE_FAIL) {
1331 snand_log_chip(snf->pdev,
1332 "Block erase failed on block %u\n", block);
1333 return -EIO;
1334 }
1335
1336 return 0;
1337}
1338
1339static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1340{
1341 int ret;
1342
1343 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1344 false);
1345 if (ret && ret != -EBADMSG)
1346 return ret;
1347
1348 return snf->buf_cache[0] != 0xff;
1349}
1350
1351static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1352{
1353 int ret;
1354
1355 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1356 true);
1357 if (ret && ret != -EBADMSG)
1358 return ret;
1359
1360 return snf->buf_cache[0] != 0xff;
1361}
1362
1363int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1364{
1365 if (!snf)
1366 return -EINVAL;
1367
1368 if (addr >= snf->size)
1369 return -EINVAL;
1370
1371 addr &= ~snf->erasesize_mask;
1372
1373 if (snf->nfi_soc->bbm_swap)
1374 return mtk_snand_block_isbad_std(snf, addr);
1375
1376 return mtk_snand_block_isbad_mtk(snf, addr);
1377}
1378
1379static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1380{
1381 /* Standard BBM position */
1382 memset(snf->buf_cache, 0xff, snf->oobsize);
1383 snf->buf_cache[0] = 0;
1384
1385 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1386 false);
1387}
1388
1389static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1390{
1391 /* Write the whole page with zeros */
1392 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1393
1394 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1395 snf->buf_cache + snf->writesize, true,
1396 true);
1397}
1398
1399int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1400{
1401 if (!snf)
1402 return -EINVAL;
1403
1404 if (addr >= snf->size)
1405 return -EINVAL;
1406
1407 addr &= ~snf->erasesize_mask;
1408
1409 if (snf->nfi_soc->bbm_swap)
1410 return mtk_snand_block_markbad_std(snf, addr);
1411
1412 return mtk_snand_block_markbad_mtk(snf, addr);
1413}
1414
1415int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1416 const uint8_t *oobbuf, size_t ooblen)
1417{
1418 size_t len = ooblen, sect_fdm_len;
1419 const uint8_t *oob = oobbuf;
1420 uint32_t step = 0;
1421
1422 if (!snf || !oobraw || !oob)
1423 return -EINVAL;
1424
1425 while (len && step < snf->ecc_steps) {
1426 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1427 if (sect_fdm_len > len)
1428 sect_fdm_len = len;
1429
1430 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1431 sect_fdm_len);
1432
1433 len -= sect_fdm_len;
1434 oob += sect_fdm_len;
1435 step++;
1436 }
1437
1438 return len;
1439}
1440
1441int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1442 size_t ooblen, const uint8_t *oobraw)
1443{
1444 size_t len = ooblen, sect_fdm_len;
1445 uint8_t *oob = oobbuf;
1446 uint32_t step = 0;
1447
1448 if (!snf || !oobraw || !oob)
1449 return -EINVAL;
1450
1451 while (len && step < snf->ecc_steps) {
1452 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1453 if (sect_fdm_len > len)
1454 sect_fdm_len = len;
1455
1456 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1457 sect_fdm_len);
1458
1459 len -= sect_fdm_len;
1460 oob += sect_fdm_len;
1461 step++;
1462 }
1463
1464 return len;
1465}
1466
1467int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1468 void *buf, void *oob, size_t ooblen,
1469 size_t *actualooblen, bool raw)
1470{
1471 int ret, oobremain;
1472
1473 if (!snf)
1474 return -EINVAL;
1475
1476 if (!oob)
1477 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1478
1479 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1480 if (ret && ret != -EBADMSG) {
1481 if (actualooblen)
1482 *actualooblen = 0;
1483 return ret;
1484 }
1485
1486 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1487 if (actualooblen)
1488 *actualooblen = ooblen - oobremain;
1489
1490 return ret;
1491}
1492
1493int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1494 const void *buf, const void *oob,
1495 size_t ooblen, size_t *actualooblen, bool raw)
1496{
1497 int oobremain;
1498
1499 if (!snf)
1500 return -EINVAL;
1501
1502 if (!oob)
1503 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1504
1505 memset(snf->buf_cache, 0xff, snf->oobsize);
1506 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1507 if (actualooblen)
1508 *actualooblen = ooblen - oobremain;
1509
1510 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1511}
1512
1513int mtk_snand_get_chip_info(struct mtk_snand *snf,
1514 struct mtk_snand_chip_info *info)
1515{
1516 if (!snf || !info)
1517 return -EINVAL;
1518
1519 info->model = snf->model;
1520 info->chipsize = snf->size;
1521 info->blocksize = snf->erasesize;
1522 info->pagesize = snf->writesize;
1523 info->sparesize = snf->oobsize;
1524 info->spare_per_sector = snf->spare_per_sector;
1525 info->fdm_size = snf->nfi_soc->fdm_size;
1526 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1527 info->num_sectors = snf->ecc_steps;
1528 info->sector_size = snf->nfi_soc->sector_size;
1529 info->ecc_strength = snf->ecc_strength;
1530 info->ecc_bytes = snf->ecc_bytes;
1531
1532 return 0;
1533}
1534
1535int mtk_snand_irq_process(struct mtk_snand *snf)
1536{
1537 uint32_t sta, ien;
1538
1539 if (!snf)
1540 return -EINVAL;
1541
1542 sta = nfi_read32(snf, NFI_INTR_STA);
1543 ien = nfi_read32(snf, NFI_INTR_EN);
1544
1545 if (!(sta & ien))
1546 return 0;
1547
1548 nfi_write32(snf, NFI_INTR_EN, 0);
1549 irq_completion_done(snf->pdev);
1550
1551 return 1;
1552}
1553
1554static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1555{
1556 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1557 int i, mul = 1;
1558
1559 /*
1560 * If we're using the 1KB sector size, HW will automatically
1561 * double the spare size. So we should only use half of the value.
1562 */
1563 if (snf->nfi_soc->sector_size == 1024)
1564 mul = 2;
1565
1566 spare_per_step /= mul;
1567
1568 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1569 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1570 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1571 snf->spare_per_sector *= mul;
1572 return i;
1573 }
1574 }
1575
1576 snand_log_nfi(snf->pdev,
1577 "Page size %u+%u is not supported\n", snf->writesize,
1578 snf->oobsize);
1579
1580 return -1;
1581}
1582
1583static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1584{
1585 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1586 uint32_t sector_size_512;
1587
1588 if (snf->nfi_soc->sector_size == 512) {
1589 sector_size_512 = NFI_SEC_SEL_512;
1590 spare_size_shift = NFI_SPARE_SIZE_S;
1591 } else {
1592 sector_size_512 = 0;
1593 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1594 }
1595
1596 switch (snf->writesize) {
1597 case SZ_512:
1598 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1599 break;
1600 case SZ_2K:
1601 if (snf->nfi_soc->sector_size == 512)
1602 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1603 else
1604 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1605 break;
1606 case SZ_4K:
1607 if (snf->nfi_soc->sector_size == 512)
1608 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1609 else
1610 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1611 break;
1612 case SZ_8K:
1613 if (snf->nfi_soc->sector_size == 512)
1614 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1615 else
1616 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1617 break;
1618 case SZ_16K:
1619 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1620 break;
1621 default:
1622 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1623 snf->writesize);
1624 return -ENOTSUPP;
1625 }
1626
1627 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1628 if (unlikely(spare_size_idx < 0))
1629 return -ENOTSUPP;
1630
1631 snf->raw_sector_size = snf->nfi_soc->sector_size +
1632 snf->spare_per_sector;
1633
1634 /* Setup page format */
1635 nfi_write32(snf, NFI_PAGEFMT,
1636 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1637 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1638 (spare_size_idx << spare_size_shift) |
1639 (pagesize_idx << NFI_PAGE_SIZE_S) |
1640 sector_size_512);
1641
1642 return 0;
1643}
1644
1645static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1646 uint32_t snfi_caps, uint8_t *opcode,
1647 uint8_t *dummy,
1648 const struct snand_io_cap *op_cap)
1649{
1650 uint32_t i, caps;
1651
1652 caps = snfi_caps & op_cap->caps;
1653
1654 i = fls(caps);
1655 if (i > 0) {
1656 *opcode = op_cap->opcodes[i - 1].opcode;
1657 if (dummy)
1658 *dummy = op_cap->opcodes[i - 1].dummy;
1659 return i - 1;
1660 }
1661
1662 return __SNAND_IO_MAX;
1663}
1664
1665static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1666 uint32_t snfi_caps,
1667 const struct snand_io_cap *op_cap)
1668{
1669 enum snand_flash_io idx;
1670
1671 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1672 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1673 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1674 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1675 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1676 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1677 };
1678
1679 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1680 &snf->dummy_rfc, op_cap);
1681 if (idx >= __SNAND_IO_MAX) {
1682 snand_log_snfi(snf->pdev,
1683 "No capable opcode for read from cache\n");
1684 return -ENOTSUPP;
1685 }
1686
1687 snf->mode_rfc = rfc_modes[idx];
1688
1689 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1690 snf->quad_spi_op = true;
1691
1692 return 0;
1693}
1694
1695static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1696 const struct snand_io_cap *op_cap)
1697{
1698 enum snand_flash_io idx;
1699
1700 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1701 [SNAND_IO_1_1_1] = 0,
1702 [SNAND_IO_1_1_4] = 1,
1703 };
1704
1705 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1706 NULL, op_cap);
1707 if (idx >= __SNAND_IO_MAX) {
1708 snand_log_snfi(snf->pdev,
1709 "No capable opcode for program load\n");
1710 return -ENOTSUPP;
1711 }
1712
1713 snf->mode_pl = pl_modes[idx];
1714
1715 if (idx == SNAND_IO_1_1_4)
1716 snf->quad_spi_op = true;
1717
1718 return 0;
1719}
1720
1721static int mtk_snand_setup(struct mtk_snand *snf,
1722 const struct snand_flash_info *snand_info)
1723{
1724 const struct snand_mem_org *memorg = &snand_info->memorg;
1725 uint32_t i, msg_size, snfi_caps;
1726 int ret;
1727
1728 /* Calculate flash memory organization */
1729 snf->model = snand_info->model;
1730 snf->writesize = memorg->pagesize;
1731 snf->oobsize = memorg->sparesize;
1732 snf->erasesize = snf->writesize * memorg->pages_per_block;
1733 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1734 snf->size = snf->die_size * memorg->ndies;
1735 snf->num_dies = memorg->ndies;
1736
1737 snf->writesize_mask = snf->writesize - 1;
1738 snf->erasesize_mask = snf->erasesize - 1;
1739 snf->die_mask = snf->die_size - 1;
1740
1741 snf->writesize_shift = ffs(snf->writesize) - 1;
1742 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1743 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1744
1745 snf->select_die = snand_info->select_die;
1746
1747 /* Determine opcodes for read from cache/program load */
1748 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1749 if (snf->snfi_quad_spi)
1750 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1751
1752 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1753 if (ret)
1754 return ret;
1755
1756 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1757 if (ret)
1758 return ret;
1759
1760 /* ECC and page format */
1761 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1762 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1763 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1764 snf->writesize);
1765 return -ENOTSUPP;
1766 }
1767
1768 ret = mtk_snand_pagefmt_setup(snf);
1769 if (ret)
1770 return ret;
1771
1772 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1773 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1774 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1775 msg_size);
1776 if (ret)
1777 return ret;
1778
1779 nfi_write16(snf, NFI_CNFG, 0);
1780
1781 /* Tuning options */
1782 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
developer17ded802021-07-06 20:48:25 +08001783 nfi_write32(snf, SNF_DLY_CTL3, (snf->nfi_soc->sample_delay << SFCK_SAM_DLY_S));
developerfd40db22021-04-29 10:08:25 +08001784
1785 /* Interrupts */
1786 nfi_read32(snf, NFI_INTR_STA);
1787 nfi_write32(snf, NFI_INTR_EN, 0);
1788
1789 /* Clear SNF done flag */
1790 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1791 nfi_write32(snf, SNF_STA_CTL1, 0);
1792
1793 /* Initialization on all dies */
1794 for (i = 0; i < snf->num_dies; i++) {
1795 mtk_snand_select_die(snf, i);
1796
1797 /* Disable On-Die ECC engine */
1798 ret = mtk_snand_ondie_ecc_control(snf, false);
1799 if (ret)
1800 return ret;
1801
1802 /* Disable block protection */
1803 mtk_snand_unlock(snf);
1804
1805 /* Enable/disable quad-spi */
1806 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1807 }
1808
1809 mtk_snand_select_die(snf, 0);
1810
1811 return 0;
1812}
1813
1814static int mtk_snand_id_probe(struct mtk_snand *snf,
1815 const struct snand_flash_info **snand_info)
1816{
1817 uint8_t id[4], op[2];
1818 int ret;
1819
1820 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1821 op[0] = SNAND_CMD_READID;
1822 op[1] = 0;
1823 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1824 if (ret)
1825 return ret;
1826
1827 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1828 if (*snand_info)
1829 return 0;
1830
1831 /* Read SPI-NAND JEDEC ID, OP + ID */
1832 op[0] = SNAND_CMD_READID;
1833 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1834 if (ret)
1835 return ret;
1836
1837 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1838 if (*snand_info)
1839 return 0;
1840
1841 snand_log_chip(snf->pdev,
1842 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1843 id[0], id[1], id[2], id[3]);
1844
1845 return -EINVAL;
1846}
1847
1848int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1849 struct mtk_snand **psnf)
1850{
1851 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001852 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001853 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001854 int ret;
1855
1856 if (!pdata || !psnf)
1857 return -EINVAL;
1858
1859 if (pdata->soc >= __SNAND_SOC_MAX) {
1860 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1861 pdata->soc);
1862 return -EINVAL;
1863 }
1864
1865 /* Dummy instance only for initial reset and id probe */
1866 tmpsnf.nfi_base = pdata->nfi_base;
1867 tmpsnf.ecc_base = pdata->ecc_base;
1868 tmpsnf.soc = pdata->soc;
1869 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1870 tmpsnf.pdev = dev;
1871
1872 /* Switch to SNFI mode */
1873 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1874
1875 /* Reset SNFI & NFI */
1876 mtk_snand_mac_reset(&tmpsnf);
1877 mtk_nfi_reset(&tmpsnf);
1878
1879 /* Reset SPI-NAND chip */
1880 ret = mtk_snand_chip_reset(&tmpsnf);
1881 if (ret) {
1882 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1883 return ret;
1884 }
1885
1886 /* Probe SPI-NAND flash by JEDEC ID */
1887 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1888 if (ret)
1889 return ret;
1890
1891 rawpage_size = snand_info->memorg.pagesize +
1892 snand_info->memorg.sparesize;
1893
developer4da1bed2021-05-08 17:30:37 +08001894 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1895 sizeof(*snf->sect_bf);
1896
developerfd40db22021-04-29 10:08:25 +08001897 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001898 snf = generic_mem_alloc(dev,
1899 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001900 if (!snf) {
1901 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1902 return -ENOMEM;
1903 }
1904
developer4da1bed2021-05-08 17:30:37 +08001905 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1906 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001907
1908 /* Allocate memory for DMA buffer */
1909 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1910 if (!snf->page_cache) {
1911 generic_mem_free(dev, snf);
1912 snand_log_chip(dev,
1913 "Failed to allocate memory for DMA buffer\n");
1914 return -ENOMEM;
1915 }
1916
1917 /* Fill up instance */
1918 snf->pdev = dev;
1919 snf->nfi_base = pdata->nfi_base;
1920 snf->ecc_base = pdata->ecc_base;
1921 snf->soc = pdata->soc;
1922 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1923 snf->snfi_quad_spi = pdata->quad_spi;
1924
1925 /* Initialize SNFI & ECC engine */
1926 ret = mtk_snand_setup(snf, snand_info);
1927 if (ret) {
1928 dma_mem_free(dev, snf->page_cache);
1929 generic_mem_free(dev, snf);
1930 return ret;
1931 }
1932
1933 *psnf = snf;
1934
1935 return 0;
1936}
1937
1938int mtk_snand_cleanup(struct mtk_snand *snf)
1939{
1940 if (!snf)
1941 return 0;
1942
1943 dma_mem_free(snf->pdev, snf->page_cache);
1944 generic_mem_free(snf->pdev, snf);
1945
1946 return 0;
1947}