blob: a0a50f7bb40b35436bb51c6294f29581e73b2d9c [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "mtk-snand-def.h"
9
10/* NFI registers */
11#define NFI_CNFG 0x000
12#define CNFG_OP_MODE_S 12
13#define CNFG_OP_MODE_CUST 6
14#define CNFG_OP_MODE_PROGRAM 3
15#define CNFG_AUTO_FMT_EN BIT(9)
16#define CNFG_HW_ECC_EN BIT(8)
17#define CNFG_DMA_BURST_EN BIT(2)
18#define CNFG_READ_MODE BIT(1)
19#define CNFG_DMA_MODE BIT(0)
20
21#define NFI_PAGEFMT 0x0004
22#define NFI_SPARE_SIZE_LS_S 16
23#define NFI_FDM_ECC_NUM_S 12
24#define NFI_FDM_NUM_S 8
25#define NFI_SPARE_SIZE_S 4
26#define NFI_SEC_SEL_512 BIT(2)
27#define NFI_PAGE_SIZE_S 0
28#define NFI_PAGE_SIZE_512_2K 0
29#define NFI_PAGE_SIZE_2K_4K 1
30#define NFI_PAGE_SIZE_4K_8K 2
31#define NFI_PAGE_SIZE_8K_16K 3
32
33#define NFI_CON 0x008
34#define CON_SEC_NUM_S 12
35#define CON_BWR BIT(9)
36#define CON_BRD BIT(8)
37#define CON_NFI_RST BIT(1)
38#define CON_FIFO_FLUSH BIT(0)
39
40#define NFI_INTR_EN 0x010
41#define NFI_INTR_STA 0x014
42#define NFI_IRQ_INTR_EN BIT(31)
43#define NFI_IRQ_CUS_READ BIT(8)
44#define NFI_IRQ_CUS_PG BIT(7)
45
46#define NFI_CMD 0x020
47
48#define NFI_STRDATA 0x040
49#define STR_DATA BIT(0)
50
51#define NFI_STA 0x060
52#define NFI_NAND_FSM GENMASK(28, 24)
53#define NFI_FSM GENMASK(19, 16)
54#define READ_EMPTY BIT(12)
55
56#define NFI_FIFOSTA 0x064
57#define FIFO_WR_REMAIN_S 8
58#define FIFO_RD_REMAIN_S 0
59
developerae50ce92021-05-18 19:08:57 +080060#define NFI_ADDRCNTR 0x070
61#define SEC_CNTR GENMASK(16, 12)
developerab415832021-05-26 09:49:32 +080062#define SEC_CNTR_S 12
63#define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080064
developerfd40db22021-04-29 10:08:25 +080065#define NFI_STRADDR 0x080
66
developerae50ce92021-05-18 19:08:57 +080067#define NFI_BYTELEN 0x084
developerab415832021-05-26 09:49:32 +080068#define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
developerae50ce92021-05-18 19:08:57 +080069
developerfd40db22021-04-29 10:08:25 +080070#define NFI_FDM0L 0x0a0
71#define NFI_FDM0M 0x0a4
72#define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
73#define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
74
75#define NFI_DEBUG_CON1 0x220
76#define WBUF_EN BIT(2)
77
78#define NFI_MASTERSTA 0x224
79#define MAS_ADDR GENMASK(11, 9)
80#define MAS_RD GENMASK(8, 6)
81#define MAS_WR GENMASK(5, 3)
82#define MAS_RDDLY GENMASK(2, 0)
83#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
84#define AHB_BUS_BUSY BIT(1)
85#define BUS_BUSY BIT(0)
developer80309642022-08-17 10:19:17 +080086#define NFI_MASTERSTA_MASK_7981 (AHB_BUS_BUSY | BUS_BUSY)
developerfd40db22021-04-29 10:08:25 +080087#define NFI_MASTERSTA_MASK_7986 (AHB_BUS_BUSY | BUS_BUSY)
88
89/* SNFI registers */
90#define SNF_MAC_CTL 0x500
91#define MAC_XIO_SEL BIT(4)
92#define SF_MAC_EN BIT(3)
93#define SF_TRIG BIT(2)
94#define WIP_READY BIT(1)
95#define WIP BIT(0)
96
97#define SNF_MAC_OUTL 0x504
98#define SNF_MAC_INL 0x508
99
100#define SNF_RD_CTL2 0x510
101#define DATA_READ_DUMMY_S 8
102#define DATA_READ_CMD_S 0
103
104#define SNF_RD_CTL3 0x514
105
106#define SNF_PG_CTL1 0x524
107#define PG_LOAD_CMD_S 8
108
109#define SNF_PG_CTL2 0x528
110
111#define SNF_MISC_CTL 0x538
112#define SW_RST BIT(28)
113#define FIFO_RD_LTC_S 25
114#define PG_LOAD_X4_EN BIT(20)
115#define DATA_READ_MODE_S 16
116#define DATA_READ_MODE GENMASK(18, 16)
117#define DATA_READ_MODE_X1 0
118#define DATA_READ_MODE_X2 1
119#define DATA_READ_MODE_X4 2
120#define DATA_READ_MODE_DUAL 5
121#define DATA_READ_MODE_QUAD 6
developer17ded802021-07-06 20:48:25 +0800122#define LATCH_LAT_S 8
123#define LATCH_LAT GENMASK(9, 8)
developerfd40db22021-04-29 10:08:25 +0800124#define PG_LOAD_CUSTOM_EN BIT(7)
125#define DATARD_CUSTOM_EN BIT(6)
126#define CS_DESELECT_CYC_S 0
127
128#define SNF_MISC_CTL2 0x53c
129#define PROGRAM_LOAD_BYTE_NUM_S 16
130#define READ_DATA_BYTE_NUM_S 11
131
132#define SNF_DLY_CTL3 0x548
133#define SFCK_SAM_DLY_S 0
134
135#define SNF_STA_CTL1 0x550
136#define CUS_PG_DONE BIT(28)
137#define CUS_READ_DONE BIT(27)
138#define SPI_STATE_S 0
139#define SPI_STATE GENMASK(3, 0)
140
141#define SNF_CFG 0x55c
142#define SPI_MODE BIT(0)
143
144#define SNF_GPRAM 0x800
145#define SNF_GPRAM_SIZE 0xa0
146
147#define SNFI_POLL_INTERVAL 1000000
148
149static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
150
developer80309642022-08-17 10:19:17 +0800151static const uint8_t mt7981_spare_sizes[] = {
152 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
153 67, 74
154};
155
developerfd40db22021-04-29 10:08:25 +0800156static const uint8_t mt7986_spare_sizes[] = {
157 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64,
158 67, 74
159};
160
161static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
162 [SNAND_SOC_MT7622] = {
163 .sector_size = 512,
164 .max_sectors = 8,
165 .fdm_size = 8,
166 .fdm_ecc_size = 1,
167 .fifo_size = 32,
168 .bbm_swap = false,
169 .empty_page_check = false,
170 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
171 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800172 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
173 .latch_lat = 0,
174 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800175 },
176 [SNAND_SOC_MT7629] = {
177 .sector_size = 512,
178 .max_sectors = 8,
179 .fdm_size = 8,
180 .fdm_ecc_size = 1,
181 .fifo_size = 32,
182 .bbm_swap = true,
183 .empty_page_check = false,
184 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
185 .spare_sizes = mt7622_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800186 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes),
187 .latch_lat = 0,
188 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800189 },
developer80309642022-08-17 10:19:17 +0800190 [SNAND_SOC_MT7981] = {
191 .sector_size = 1024,
192 .max_sectors = 16,
193 .fdm_size = 8,
194 .fdm_ecc_size = 1,
195 .fifo_size = 64,
196 .bbm_swap = true,
197 .empty_page_check = true,
198 .mastersta_mask = NFI_MASTERSTA_MASK_7981,
199 .spare_sizes = mt7981_spare_sizes,
200 .num_spare_size = ARRAY_SIZE(mt7981_spare_sizes),
201 .latch_lat = 0,
202 .sample_delay = 40
203 },
developerfd40db22021-04-29 10:08:25 +0800204 [SNAND_SOC_MT7986] = {
205 .sector_size = 1024,
206 .max_sectors = 16,
207 .fdm_size = 8,
208 .fdm_ecc_size = 1,
209 .fifo_size = 64,
210 .bbm_swap = true,
211 .empty_page_check = true,
212 .mastersta_mask = NFI_MASTERSTA_MASK_7986,
213 .spare_sizes = mt7986_spare_sizes,
developer17ded802021-07-06 20:48:25 +0800214 .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes),
developer7d60cdf2021-08-05 15:53:17 +0800215 .latch_lat = 0,
216 .sample_delay = 40
developerfd40db22021-04-29 10:08:25 +0800217 },
218};
219
220static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
221{
222 return readl(snf->nfi_base + reg);
223}
224
225static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
226 uint32_t val)
227{
228 writel(val, snf->nfi_base + reg);
229}
230
231static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
232 uint16_t val)
233{
234 writew(val, snf->nfi_base + reg);
235}
236
237static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
238 uint32_t set)
239{
240 uint32_t val;
241
242 val = readl(snf->nfi_base + reg);
243 val &= ~clr;
244 val |= set;
245 writel(val, snf->nfi_base + reg);
246}
247
248static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
249 const uint8_t *data, uint32_t len)
250{
251 uint32_t i, val = 0, es = sizeof(uint32_t);
252
253 for (i = reg; i < reg + len; i++) {
254 val |= ((uint32_t)*data++) << (8 * (i % es));
255
256 if (i % es == es - 1 || i == reg + len - 1) {
257 nfi_write32(snf, i & ~(es - 1), val);
258 val = 0;
259 }
260 }
261}
262
263static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
264 uint32_t len)
265{
266 uint32_t i, val = 0, es = sizeof(uint32_t);
267
268 for (i = reg; i < reg + len; i++) {
269 if (i == reg || i % es == 0)
270 val = nfi_read32(snf, i & ~(es - 1));
271
272 *data++ = (uint8_t)(val >> (8 * (i % es)));
273 }
274}
275
276static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
277{
278 uint8_t tmp = *bm1;
279 *bm1 = *bm2;
280 *bm2 = tmp;
281}
282
283static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
284{
285 uint32_t fdm_bbm_pos;
286
287 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
288 return;
289
290 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
291 snf->nfi_soc->sector_size;
292 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
293 &snf->page_cache[snf->writesize]);
294}
295
296static void mtk_snand_bm_swap(struct mtk_snand *snf)
297{
298 uint32_t buf_bbm_pos, fdm_bbm_pos;
299
300 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
301 return;
302
303 buf_bbm_pos = snf->writesize -
304 (snf->ecc_steps - 1) * snf->spare_per_sector;
305 fdm_bbm_pos = snf->writesize +
306 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
307 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
308 &snf->page_cache[buf_bbm_pos]);
309}
310
311static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
312{
313 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
314
315 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
316 return;
317
318 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
319 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
320 snf->nfi_soc->sector_size;
321 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
322 &snf->page_cache[fdm_bbm_pos2]);
323}
324
325static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
326{
327 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
328
329 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
330 return;
331
332 fdm_bbm_pos1 = snf->writesize;
333 fdm_bbm_pos2 = snf->writesize +
334 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
335 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
336 &snf->page_cache[fdm_bbm_pos2]);
337}
338
339static int mtk_nfi_reset(struct mtk_snand *snf)
340{
341 uint32_t val, fifo_mask;
342 int ret;
343
344 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
345
346 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
347 !(val & snf->nfi_soc->mastersta_mask), 0,
348 SNFI_POLL_INTERVAL);
349 if (ret) {
350 snand_log_nfi(snf->pdev,
351 "NFI master is still busy after reset\n");
352 return ret;
353 }
354
355 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
356 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
357 SNFI_POLL_INTERVAL);
358 if (ret) {
359 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
360 return ret;
361 }
362
363 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
364 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
365 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
366 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
367 if (ret) {
368 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
369 return ret;
370 }
371
372 return 0;
373}
374
375static int mtk_snand_mac_reset(struct mtk_snand *snf)
376{
377 int ret;
378 uint32_t val;
379
380 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
381
382 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
383 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
384 if (ret)
385 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
386
387 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
developer17ded802021-07-06 20:48:25 +0800388 (10 << CS_DESELECT_CYC_S) | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800389
390 return ret;
391}
392
393static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
394 uint32_t inlen)
395{
396 int ret;
397 uint32_t val;
398
399 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
400 nfi_write32(snf, SNF_MAC_OUTL, outlen);
401 nfi_write32(snf, SNF_MAC_INL, inlen);
402
403 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
404
405 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
406 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
407 if (ret) {
408 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
409 goto cleanup;
410 }
411
412 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
413 !(val & WIP), 0, SNFI_POLL_INTERVAL);
414 if (ret) {
415 snand_log_snfi(snf->pdev,
416 "Timed out waiting for WIP cleared\n");
417 }
418
419cleanup:
420 nfi_write32(snf, SNF_MAC_CTL, 0);
421
422 return ret;
423}
424
425int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
426 uint8_t *in, uint32_t inlen)
427{
428 int ret;
429
430 if (outlen + inlen > SNF_GPRAM_SIZE)
431 return -EINVAL;
432
433 mtk_snand_mac_reset(snf);
434
435 nfi_write_data(snf, SNF_GPRAM, out, outlen);
436
437 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
438 if (ret)
439 return ret;
440
441 if (!inlen)
442 return 0;
443
444 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
445
446 return 0;
447}
448
449static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
450{
451 uint8_t op[2], val;
452 int ret;
453
454 op[0] = SNAND_CMD_GET_FEATURE;
455 op[1] = (uint8_t)addr;
456
457 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
458 if (ret)
459 return ret;
460
461 return val;
462}
463
464int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
465{
466 uint8_t op[3];
467
468 op[0] = SNAND_CMD_SET_FEATURE;
469 op[1] = (uint8_t)addr;
470 op[2] = (uint8_t)val;
471
472 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
473}
474
475static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
476{
477 int val;
478 mtk_snand_time_t time_start, tmo;
479
480 time_start = timer_get_ticks();
481 tmo = timer_time_to_tick(wait_us);
482
483 do {
484 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
485 if (!(val & SNAND_STATUS_OIP))
486 return val & (SNAND_STATUS_ERASE_FAIL |
487 SNAND_STATUS_PROGRAM_FAIL);
488 } while (!timer_is_timeout(time_start, tmo));
489
490 return -ETIMEDOUT;
491}
492
493int mtk_snand_chip_reset(struct mtk_snand *snf)
494{
495 uint8_t op = SNAND_CMD_RESET;
496 int ret;
497
498 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
499 if (ret)
500 return ret;
501
502 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
503 if (ret < 0)
504 return ret;
505
506 return 0;
507}
508
509static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
510 uint8_t set)
511{
512 int val, newval;
513 int ret;
514
515 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
516 if (val < 0) {
517 snand_log_chip(snf->pdev,
518 "Failed to get configuration feature\n");
519 return val;
520 }
521
522 newval = (val & (~clr)) | set;
523
524 if (newval == val)
525 return 0;
526
527 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
528 (uint8_t)newval);
529 if (val < 0) {
530 snand_log_chip(snf->pdev,
531 "Failed to set configuration feature\n");
532 return ret;
533 }
534
535 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
536 if (val < 0) {
537 snand_log_chip(snf->pdev,
538 "Failed to get configuration feature\n");
539 return val;
540 }
541
542 if (newval != val)
543 return -ENOTSUPP;
544
545 return 0;
546}
547
548static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
549{
550 int ret;
551
552 if (enable)
553 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
554 else
555 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
556
557 if (ret) {
558 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
559 enable ? "enable" : "disable");
560 }
561
562 return ret;
563}
564
565static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
566{
567 int ret;
568
569 if (enable) {
570 ret = mtk_snand_config_feature(snf, 0,
571 SNAND_FEATURE_QUAD_ENABLE);
572 } else {
573 ret = mtk_snand_config_feature(snf,
574 SNAND_FEATURE_QUAD_ENABLE, 0);
575 }
576
577 if (ret) {
578 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
579 enable ? "enable" : "disable");
580 }
581
582 return ret;
583}
584
585static int mtk_snand_unlock(struct mtk_snand *snf)
586{
587 int ret;
588
589 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
590 if (ret) {
591 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
592 return ret;
593 }
594
595 return 0;
596}
597
598static int mtk_snand_write_enable(struct mtk_snand *snf)
599{
600 uint8_t op = SNAND_CMD_WRITE_ENABLE;
601 int ret, val;
602
603 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
604 if (ret)
605 return ret;
606
607 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
608 if (val < 0)
609 return ret;
610
611 if (val & SNAND_STATUS_WEL)
612 return 0;
613
614 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
615
616 return -ENOTSUPP;
617}
618
619static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
620{
621 if (!snf->select_die)
622 return 0;
623
624 return snf->select_die(snf, dieidx);
625}
626
627static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
628 uint64_t addr)
629{
630 uint32_t dieidx;
631
632 if (!snf->select_die)
633 return addr;
634
635 dieidx = addr >> snf->die_shift;
636
637 mtk_snand_select_die(snf, dieidx);
638
639 return addr & snf->die_mask;
640}
641
642static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
643 uint32_t page)
644{
645 uint32_t pages_per_block;
646
647 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
648
649 if (page & pages_per_block)
650 return 1 << (snf->writesize_shift + 1);
651
652 return 0;
653}
654
655static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
656{
657 uint8_t op[4];
658
659 op[0] = cmd;
660 op[1] = (page >> 16) & 0xff;
661 op[2] = (page >> 8) & 0xff;
662 op[3] = page & 0xff;
663
664 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
665}
666
667static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
668{
669 uint32_t vall, valm;
670 uint8_t *oobptr = buf;
671 int i, j;
672
673 for (i = 0; i < snf->ecc_steps; i++) {
674 vall = nfi_read32(snf, NFI_FDML(i));
675 valm = nfi_read32(snf, NFI_FDMM(i));
676
677 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
678 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
679
680 oobptr += snf->nfi_soc->fdm_size;
681 }
682}
683
developer4da1bed2021-05-08 17:30:37 +0800684static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
685 uint32_t sect, uint8_t *oob)
686{
687 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
688 uint32_t coladdr, raw_offs, offs;
689 uint8_t op[4];
690
691 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
692 snand_log_snfi(snf->pdev,
693 "ECC parity size does not fit the GPRAM\n");
694 return -ENOTSUPP;
695 }
696
697 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
698 snf->nfi_soc->fdm_size;
699 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
700
701 /* Column address with plane bit */
702 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
703
704 op[0] = SNAND_CMD_READ_FROM_CACHE;
705 op[1] = (coladdr >> 8) & 0xff;
706 op[2] = coladdr & 0xff;
707 op[3] = 0;
708
709 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
710}
711
712static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
713{
714 uint8_t *oob = snf->page_cache + snf->writesize;
715 int i, rc, ret = 0, max_bitflips = 0;
716
717 for (i = 0; i < snf->ecc_steps; i++) {
718 if (snf->sect_bf[i] >= 0) {
719 if (snf->sect_bf[i] > max_bitflips)
720 max_bitflips = snf->sect_bf[i];
721 continue;
722 }
723
724 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
725 if (rc)
726 return rc;
727
728 rc = mtk_ecc_fixup_empty_sector(snf, i);
729 if (rc < 0) {
730 ret = -EBADMSG;
731
732 snand_log_ecc(snf->pdev,
733 "Uncorrectable bitflips in page %u sect %u\n",
734 page, i);
developer5136a3f2021-05-20 10:58:54 +0800735 } else if (rc) {
developer4da1bed2021-05-08 17:30:37 +0800736 snf->sect_bf[i] = rc;
737
738 if (snf->sect_bf[i] > max_bitflips)
739 max_bitflips = snf->sect_bf[i];
740
741 snand_log_ecc(snf->pdev,
742 "%u bitflip%s corrected in page %u sect %u\n",
743 rc, rc > 1 ? "s" : "", page, i);
developer5136a3f2021-05-20 10:58:54 +0800744 } else {
745 snf->sect_bf[i] = 0;
developer4da1bed2021-05-08 17:30:37 +0800746 }
747 }
748
749 return ret ? ret : max_bitflips;
750}
751
developerfd40db22021-04-29 10:08:25 +0800752static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
753{
developerae50ce92021-05-18 19:08:57 +0800754 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +0800755 uintptr_t dma_addr;
756 int ret;
757
758 /* Column address with plane bit */
759 coladdr = mtk_snand_get_plane_address(snf, page);
760
761 mtk_snand_mac_reset(snf);
762 mtk_nfi_reset(snf);
763
764 /* Command and dummy cycles */
765 nfi_write32(snf, SNF_RD_CTL2,
766 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
767 (snf->opcode_rfc << DATA_READ_CMD_S));
768
769 /* Column address */
770 nfi_write32(snf, SNF_RD_CTL3, coladdr);
771
772 /* Set read mode */
773 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
developer17ded802021-07-06 20:48:25 +0800774 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
775 mode | DATARD_CUSTOM_EN | (snf->nfi_soc->latch_lat << LATCH_LAT_S));
developerfd40db22021-04-29 10:08:25 +0800776
777 /* Set bytes to read */
778 rwbytes = snf->ecc_steps * snf->raw_sector_size;
779 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
780 rwbytes);
781
782 /* NFI read prepare */
783 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
784 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
785 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
786
787 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
788
789 /* Prepare for DMA read */
790 len = snf->writesize + snf->oobsize;
791 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
792 if (ret) {
793 snand_log_nfi(snf->pdev,
794 "DMA map from device failed with %d\n", ret);
795 return ret;
796 }
797
798 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
799
800 if (!raw)
801 mtk_snand_ecc_decoder_start(snf);
802
803 /* Prepare for custom read interrupt */
804 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
805 irq_completion_init(snf->pdev);
806
807 /* Trigger NFI into custom mode */
808 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
809
810 /* Start DMA read */
811 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
812 nfi_write16(snf, NFI_STRDATA, STR_DATA);
813
814 /* Wait for operation finished */
815 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
816 CUS_READ_DONE, SNFI_POLL_INTERVAL);
817 if (ret) {
818 snand_log_nfi(snf->pdev,
819 "DMA timed out for reading from cache\n");
820 goto cleanup;
821 }
822
developerae50ce92021-05-18 19:08:57 +0800823 /* Wait for BUS_SEC_CNTR returning expected value */
824 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
825 BUS_SEC_CNTR(val) >= snf->ecc_steps,
826 0, SNFI_POLL_INTERVAL);
827 if (ret) {
828 snand_log_nfi(snf->pdev,
829 "Timed out waiting for BUS_SEC_CNTR\n");
830 goto cleanup;
831 }
832
833 /* Wait for bus becoming idle */
834 ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
835 !(val & snf->nfi_soc->mastersta_mask),
836 0, SNFI_POLL_INTERVAL);
837 if (ret) {
838 snand_log_nfi(snf->pdev,
839 "Timed out waiting for bus becoming idle\n");
840 goto cleanup;
841 }
842
developerfd40db22021-04-29 10:08:25 +0800843 if (!raw) {
844 ret = mtk_ecc_wait_decoder_done(snf);
845 if (ret)
846 goto cleanup;
847
848 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
849
developer4da1bed2021-05-08 17:30:37 +0800850 mtk_ecc_check_decode_error(snf);
developerfd40db22021-04-29 10:08:25 +0800851 mtk_snand_ecc_decoder_stop(snf);
developer4da1bed2021-05-08 17:30:37 +0800852
853 ret = mtk_snand_check_ecc_result(snf, page);
developerfd40db22021-04-29 10:08:25 +0800854 }
855
856cleanup:
857 /* DMA cleanup */
858 dma_mem_unmap(snf->pdev, dma_addr, len, false);
859
860 /* Stop read */
861 nfi_write32(snf, NFI_CON, 0);
developer4b9635a2021-05-18 14:22:39 +0800862 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +0800863
864 /* Clear SNF done flag */
865 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
866 nfi_write32(snf, SNF_STA_CTL1, 0);
867
868 /* Disable interrupt */
869 nfi_read32(snf, NFI_INTR_STA);
870 nfi_write32(snf, NFI_INTR_EN, 0);
871
developer17ded802021-07-06 20:48:25 +0800872 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN | LATCH_LAT, 0);
developerfd40db22021-04-29 10:08:25 +0800873
874 return ret;
875}
876
877static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
878{
879 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
880 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
881 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
882
883 for (i = 0; i < snf->ecc_steps; i++) {
884 raw_sector = snf->page_cache + i * snf->raw_sector_size;
885
886 if (buf) {
887 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
888 bufptr += snf->nfi_soc->sector_size;
889 }
890
891 raw_sector += snf->nfi_soc->sector_size;
892
893 if (oob) {
894 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
895 oobptr += snf->nfi_soc->fdm_size;
896 raw_sector += snf->nfi_soc->fdm_size;
897
898 memcpy(eccptr, raw_sector, ecc_bytes);
899 eccptr += ecc_bytes;
900 }
901 }
902}
903
904static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
905 void *buf, void *oob, bool raw, bool format)
906{
907 uint64_t die_addr;
developeref87e252021-07-20 22:57:05 +0800908 uint32_t page, dly_ctrl3;
909 int ret, retry_cnt = 0;
developerfd40db22021-04-29 10:08:25 +0800910
911 die_addr = mtk_snand_select_die_address(snf, addr);
912 page = die_addr >> snf->writesize_shift;
913
developeref87e252021-07-20 22:57:05 +0800914 dly_ctrl3 = nfi_read32(snf, SNF_DLY_CTL3);
915
developerfd40db22021-04-29 10:08:25 +0800916 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
917 if (ret)
918 return ret;
919
920 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
921 if (ret < 0) {
922 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
923 return ret;
924 }
925
developeref87e252021-07-20 22:57:05 +0800926retry:
developerfd40db22021-04-29 10:08:25 +0800927 ret = mtk_snand_read_cache(snf, page, raw);
928 if (ret < 0 && ret != -EBADMSG)
929 return ret;
930
developeref87e252021-07-20 22:57:05 +0800931 if (ret == -EBADMSG && retry_cnt < 16) {
932 nfi_write32(snf, SNF_DLY_CTL3, retry_cnt * 2);
933 retry_cnt++;
934 goto retry;
935 }
936
937 if (retry_cnt) {
938 if(ret == -EBADMSG) {
939 nfi_write32(snf, SNF_DLY_CTL3, dly_ctrl3);
940 snand_log_chip(snf->pdev,
941 "NFI calibration failed. Original sample delay: 0x%x\n",
942 dly_ctrl3);
943 } else {
944 snand_log_chip(snf->pdev,
945 "NFI calibration passed. New sample delay: 0x%x\n",
946 nfi_read32(snf, SNF_DLY_CTL3));
947 }
948 }
949
developerfd40db22021-04-29 10:08:25 +0800950 if (raw) {
951 if (format) {
952 mtk_snand_bm_swap_raw(snf);
953 mtk_snand_fdm_bm_swap_raw(snf);
954 mtk_snand_from_raw_page(snf, buf, oob);
955 } else {
956 if (buf)
957 memcpy(buf, snf->page_cache, snf->writesize);
958
959 if (oob) {
960 memset(oob, 0xff, snf->oobsize);
961 memcpy(oob, snf->page_cache + snf->writesize,
962 snf->ecc_steps * snf->spare_per_sector);
963 }
964 }
965 } else {
966 mtk_snand_bm_swap(snf);
967 mtk_snand_fdm_bm_swap(snf);
968
969 if (buf)
970 memcpy(buf, snf->page_cache, snf->writesize);
971
972 if (oob) {
973 memset(oob, 0xff, snf->oobsize);
974 memcpy(oob, snf->page_cache + snf->writesize,
975 snf->ecc_steps * snf->nfi_soc->fdm_size);
976 }
977 }
978
979 return ret;
980}
981
982int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
983 void *oob, bool raw)
984{
985 if (!snf || (!buf && !oob))
986 return -EINVAL;
987
988 if (addr >= snf->size)
989 return -EINVAL;
990
991 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
992}
993
994static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
995{
996 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
997 const uint8_t *oobptr = buf;
998 int i, j;
999
1000 for (i = 0; i < snf->ecc_steps; i++) {
1001 vall = 0;
1002 valm = 0;
1003
1004 for (j = 0; j < 8; j++) {
1005 if (j < 4)
1006 vall |= (j < fdm_size ? oobptr[j] : 0xff)
1007 << (j * 8);
1008 else
1009 valm |= (j < fdm_size ? oobptr[j] : 0xff)
1010 << ((j - 4) * 8);
1011 }
1012
1013 nfi_write32(snf, NFI_FDML(i), vall);
1014 nfi_write32(snf, NFI_FDMM(i), valm);
1015
1016 oobptr += fdm_size;
1017 }
1018}
1019
1020static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
1021 bool raw)
1022{
developerae50ce92021-05-18 19:08:57 +08001023 uint32_t coladdr, rwbytes, mode, len, val;
developerfd40db22021-04-29 10:08:25 +08001024 uintptr_t dma_addr;
1025 int ret;
1026
1027 /* Column address with plane bit */
1028 coladdr = mtk_snand_get_plane_address(snf, page);
1029
1030 mtk_snand_mac_reset(snf);
1031 mtk_nfi_reset(snf);
1032
1033 /* Write FDM registers if necessary */
1034 if (!raw)
1035 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
1036
1037 /* Command */
1038 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
1039
1040 /* Column address */
1041 nfi_write32(snf, SNF_PG_CTL2, coladdr);
1042
1043 /* Set write mode */
1044 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
1045 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
1046
1047 /* Set bytes to write */
1048 rwbytes = snf->ecc_steps * snf->raw_sector_size;
1049 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
1050 rwbytes);
1051
1052 /* NFI write prepare */
1053 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
1054 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1055 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
1056
1057 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
1058
1059 /* Prepare for DMA write */
1060 len = snf->writesize + snf->oobsize;
1061 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
1062 if (ret) {
1063 snand_log_nfi(snf->pdev,
1064 "DMA map to device failed with %d\n", ret);
1065 return ret;
1066 }
1067
1068 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
1069
1070 if (!raw)
1071 mtk_snand_ecc_encoder_start(snf);
1072
1073 /* Prepare for custom write interrupt */
1074 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1075 irq_completion_init(snf->pdev);
1076
1077 /* Trigger NFI into custom mode */
1078 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1079
1080 /* Start DMA write */
1081 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1082 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1083
1084 /* Wait for operation finished */
1085 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1086 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1087 if (ret) {
1088 snand_log_nfi(snf->pdev,
1089 "DMA timed out for program load\n");
1090 goto cleanup;
1091 }
1092
developerab415832021-05-26 09:49:32 +08001093 /* Wait for NFI_SEC_CNTR returning expected value */
1094 ret = read32_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1095 NFI_SEC_CNTR(val) >= snf->ecc_steps,
developerae50ce92021-05-18 19:08:57 +08001096 0, SNFI_POLL_INTERVAL);
1097 if (ret) {
1098 snand_log_nfi(snf->pdev,
1099 "Timed out waiting for BUS_SEC_CNTR\n");
1100 goto cleanup;
1101 }
1102
developerfd40db22021-04-29 10:08:25 +08001103 if (!raw)
1104 mtk_snand_ecc_encoder_stop(snf);
1105
1106cleanup:
1107 /* DMA cleanup */
1108 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1109
1110 /* Stop write */
developer4b9635a2021-05-18 14:22:39 +08001111 nfi_write32(snf, NFI_CON, 0);
1112 nfi_write16(snf, NFI_CNFG, 0);
developerfd40db22021-04-29 10:08:25 +08001113
1114 /* Clear SNF done flag */
1115 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1116 nfi_write32(snf, SNF_STA_CTL1, 0);
1117
1118 /* Disable interrupt */
1119 nfi_read32(snf, NFI_INTR_STA);
1120 nfi_write32(snf, NFI_INTR_EN, 0);
1121
1122 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1123
1124 return ret;
1125}
1126
1127static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1128 const void *buf, const void *oob,
1129 bool empty_ecc)
1130{
1131 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1132 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1133 const uint8_t *bufptr = buf, *oobptr = oob;
1134 uint8_t *raw_sector;
1135
1136 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1137 for (i = 0; i < snf->ecc_steps; i++) {
1138 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1139
1140 if (buf) {
1141 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1142 bufptr += snf->nfi_soc->sector_size;
1143 }
1144
1145 raw_sector += snf->nfi_soc->sector_size;
1146
1147 if (oob) {
1148 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1149 oobptr += snf->nfi_soc->fdm_size;
1150 raw_sector += snf->nfi_soc->fdm_size;
1151
1152 if (empty_ecc)
1153 memset(raw_sector, 0xff, ecc_bytes);
1154 else
1155 memcpy(raw_sector, eccptr, ecc_bytes);
1156 eccptr += ecc_bytes;
1157 }
1158 }
1159}
1160
1161static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1162 const void *oob)
1163{
1164 const uint8_t *p = buf;
1165 uint32_t i, j;
1166
1167 if (buf) {
1168 for (i = 0; i < snf->writesize; i++) {
1169 if (p[i] != 0xff)
1170 return false;
1171 }
1172 }
1173
1174 if (oob) {
1175 for (j = 0; j < snf->ecc_steps; j++) {
1176 p = oob + j * snf->nfi_soc->fdm_size;
1177
1178 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1179 if (p[i] != 0xff)
1180 return false;
1181 }
1182 }
1183 }
1184
1185 return true;
1186}
1187
1188static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1189 const void *buf, const void *oob,
1190 bool raw, bool format)
1191{
1192 uint64_t die_addr;
1193 bool empty_ecc = false;
1194 uint32_t page;
1195 int ret;
1196
1197 die_addr = mtk_snand_select_die_address(snf, addr);
1198 page = die_addr >> snf->writesize_shift;
1199
1200 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1201 /*
1202 * If the data in the page to be ecc-ed is full 0xff,
1203 * change to raw write mode
1204 */
1205 raw = true;
1206 format = true;
1207
1208 /* fill ecc parity code region with 0xff */
1209 empty_ecc = true;
1210 }
1211
1212 if (raw) {
1213 if (format) {
1214 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1215 mtk_snand_fdm_bm_swap_raw(snf);
1216 mtk_snand_bm_swap_raw(snf);
1217 } else {
1218 memset(snf->page_cache, 0xff,
1219 snf->writesize + snf->oobsize);
1220
1221 if (buf)
1222 memcpy(snf->page_cache, buf, snf->writesize);
1223
1224 if (oob) {
1225 memcpy(snf->page_cache + snf->writesize, oob,
1226 snf->ecc_steps * snf->spare_per_sector);
1227 }
1228 }
1229 } else {
1230 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1231 if (buf)
1232 memcpy(snf->page_cache, buf, snf->writesize);
1233
1234 if (oob) {
1235 memcpy(snf->page_cache + snf->writesize, oob,
1236 snf->ecc_steps * snf->nfi_soc->fdm_size);
1237 }
1238
1239 mtk_snand_fdm_bm_swap(snf);
1240 mtk_snand_bm_swap(snf);
1241 }
1242
1243 ret = mtk_snand_write_enable(snf);
1244 if (ret)
1245 return ret;
1246
1247 ret = mtk_snand_program_load(snf, page, raw);
1248 if (ret)
1249 return ret;
1250
1251 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1252 if (ret)
1253 return ret;
1254
1255 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1256 if (ret < 0) {
1257 snand_log_chip(snf->pdev,
1258 "Page program command timed out on page %u\n",
1259 page);
1260 return ret;
1261 }
1262
1263 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1264 snand_log_chip(snf->pdev,
1265 "Page program failed on page %u\n", page);
1266 return -EIO;
1267 }
1268
1269 return 0;
1270}
1271
1272int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1273 const void *oob, bool raw)
1274{
1275 if (!snf || (!buf && !oob))
1276 return -EINVAL;
1277
1278 if (addr >= snf->size)
1279 return -EINVAL;
1280
1281 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1282}
1283
1284int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1285{
1286 uint64_t die_addr;
1287 uint32_t page, block;
1288 int ret;
1289
1290 if (!snf)
1291 return -EINVAL;
1292
1293 if (addr >= snf->size)
1294 return -EINVAL;
1295
1296 die_addr = mtk_snand_select_die_address(snf, addr);
1297 block = die_addr >> snf->erasesize_shift;
1298 page = block << (snf->erasesize_shift - snf->writesize_shift);
1299
1300 ret = mtk_snand_write_enable(snf);
1301 if (ret)
1302 return ret;
1303
1304 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1305 if (ret)
1306 return ret;
1307
1308 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1309 if (ret < 0) {
1310 snand_log_chip(snf->pdev,
1311 "Block erase command timed out on block %u\n",
1312 block);
1313 return ret;
1314 }
1315
1316 if (ret & SNAND_STATUS_ERASE_FAIL) {
1317 snand_log_chip(snf->pdev,
1318 "Block erase failed on block %u\n", block);
1319 return -EIO;
1320 }
1321
1322 return 0;
1323}
1324
1325static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1326{
1327 int ret;
1328
1329 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1330 false);
1331 if (ret && ret != -EBADMSG)
1332 return ret;
1333
1334 return snf->buf_cache[0] != 0xff;
1335}
1336
1337static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1338{
1339 int ret;
1340
1341 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1342 true);
1343 if (ret && ret != -EBADMSG)
1344 return ret;
1345
1346 return snf->buf_cache[0] != 0xff;
1347}
1348
1349int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1350{
1351 if (!snf)
1352 return -EINVAL;
1353
1354 if (addr >= snf->size)
1355 return -EINVAL;
1356
1357 addr &= ~snf->erasesize_mask;
1358
1359 if (snf->nfi_soc->bbm_swap)
1360 return mtk_snand_block_isbad_std(snf, addr);
1361
1362 return mtk_snand_block_isbad_mtk(snf, addr);
1363}
1364
1365static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1366{
1367 /* Standard BBM position */
1368 memset(snf->buf_cache, 0xff, snf->oobsize);
1369 snf->buf_cache[0] = 0;
1370
1371 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1372 false);
1373}
1374
1375static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1376{
1377 /* Write the whole page with zeros */
1378 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1379
1380 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1381 snf->buf_cache + snf->writesize, true,
1382 true);
1383}
1384
1385int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1386{
1387 if (!snf)
1388 return -EINVAL;
1389
1390 if (addr >= snf->size)
1391 return -EINVAL;
1392
1393 addr &= ~snf->erasesize_mask;
1394
1395 if (snf->nfi_soc->bbm_swap)
1396 return mtk_snand_block_markbad_std(snf, addr);
1397
1398 return mtk_snand_block_markbad_mtk(snf, addr);
1399}
1400
1401int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1402 const uint8_t *oobbuf, size_t ooblen)
1403{
1404 size_t len = ooblen, sect_fdm_len;
1405 const uint8_t *oob = oobbuf;
1406 uint32_t step = 0;
1407
1408 if (!snf || !oobraw || !oob)
1409 return -EINVAL;
1410
1411 while (len && step < snf->ecc_steps) {
1412 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1413 if (sect_fdm_len > len)
1414 sect_fdm_len = len;
1415
1416 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1417 sect_fdm_len);
1418
1419 len -= sect_fdm_len;
1420 oob += sect_fdm_len;
1421 step++;
1422 }
1423
1424 return len;
1425}
1426
1427int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1428 size_t ooblen, const uint8_t *oobraw)
1429{
1430 size_t len = ooblen, sect_fdm_len;
1431 uint8_t *oob = oobbuf;
1432 uint32_t step = 0;
1433
1434 if (!snf || !oobraw || !oob)
1435 return -EINVAL;
1436
1437 while (len && step < snf->ecc_steps) {
1438 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1439 if (sect_fdm_len > len)
1440 sect_fdm_len = len;
1441
1442 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1443 sect_fdm_len);
1444
1445 len -= sect_fdm_len;
1446 oob += sect_fdm_len;
1447 step++;
1448 }
1449
1450 return len;
1451}
1452
1453int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1454 void *buf, void *oob, size_t ooblen,
1455 size_t *actualooblen, bool raw)
1456{
1457 int ret, oobremain;
1458
1459 if (!snf)
1460 return -EINVAL;
1461
1462 if (!oob)
1463 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1464
1465 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1466 if (ret && ret != -EBADMSG) {
1467 if (actualooblen)
1468 *actualooblen = 0;
1469 return ret;
1470 }
1471
1472 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1473 if (actualooblen)
1474 *actualooblen = ooblen - oobremain;
1475
1476 return ret;
1477}
1478
1479int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1480 const void *buf, const void *oob,
1481 size_t ooblen, size_t *actualooblen, bool raw)
1482{
1483 int oobremain;
1484
1485 if (!snf)
1486 return -EINVAL;
1487
1488 if (!oob)
1489 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1490
1491 memset(snf->buf_cache, 0xff, snf->oobsize);
1492 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1493 if (actualooblen)
1494 *actualooblen = ooblen - oobremain;
1495
1496 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1497}
1498
1499int mtk_snand_get_chip_info(struct mtk_snand *snf,
1500 struct mtk_snand_chip_info *info)
1501{
1502 if (!snf || !info)
1503 return -EINVAL;
1504
1505 info->model = snf->model;
1506 info->chipsize = snf->size;
1507 info->blocksize = snf->erasesize;
1508 info->pagesize = snf->writesize;
1509 info->sparesize = snf->oobsize;
1510 info->spare_per_sector = snf->spare_per_sector;
1511 info->fdm_size = snf->nfi_soc->fdm_size;
1512 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1513 info->num_sectors = snf->ecc_steps;
1514 info->sector_size = snf->nfi_soc->sector_size;
1515 info->ecc_strength = snf->ecc_strength;
1516 info->ecc_bytes = snf->ecc_bytes;
1517
1518 return 0;
1519}
1520
1521int mtk_snand_irq_process(struct mtk_snand *snf)
1522{
1523 uint32_t sta, ien;
1524
1525 if (!snf)
1526 return -EINVAL;
1527
1528 sta = nfi_read32(snf, NFI_INTR_STA);
1529 ien = nfi_read32(snf, NFI_INTR_EN);
1530
1531 if (!(sta & ien))
1532 return 0;
1533
1534 nfi_write32(snf, NFI_INTR_EN, 0);
1535 irq_completion_done(snf->pdev);
1536
1537 return 1;
1538}
1539
1540static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1541{
1542 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1543 int i, mul = 1;
1544
1545 /*
1546 * If we're using the 1KB sector size, HW will automatically
1547 * double the spare size. So we should only use half of the value.
1548 */
1549 if (snf->nfi_soc->sector_size == 1024)
1550 mul = 2;
1551
1552 spare_per_step /= mul;
1553
1554 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1555 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1556 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1557 snf->spare_per_sector *= mul;
1558 return i;
1559 }
1560 }
1561
1562 snand_log_nfi(snf->pdev,
1563 "Page size %u+%u is not supported\n", snf->writesize,
1564 snf->oobsize);
1565
1566 return -1;
1567}
1568
1569static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1570{
1571 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1572 uint32_t sector_size_512;
1573
1574 if (snf->nfi_soc->sector_size == 512) {
1575 sector_size_512 = NFI_SEC_SEL_512;
1576 spare_size_shift = NFI_SPARE_SIZE_S;
1577 } else {
1578 sector_size_512 = 0;
1579 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1580 }
1581
1582 switch (snf->writesize) {
1583 case SZ_512:
1584 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1585 break;
1586 case SZ_2K:
1587 if (snf->nfi_soc->sector_size == 512)
1588 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1589 else
1590 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1591 break;
1592 case SZ_4K:
1593 if (snf->nfi_soc->sector_size == 512)
1594 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1595 else
1596 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1597 break;
1598 case SZ_8K:
1599 if (snf->nfi_soc->sector_size == 512)
1600 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1601 else
1602 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1603 break;
1604 case SZ_16K:
1605 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1606 break;
1607 default:
1608 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1609 snf->writesize);
1610 return -ENOTSUPP;
1611 }
1612
1613 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1614 if (unlikely(spare_size_idx < 0))
1615 return -ENOTSUPP;
1616
1617 snf->raw_sector_size = snf->nfi_soc->sector_size +
1618 snf->spare_per_sector;
1619
1620 /* Setup page format */
1621 nfi_write32(snf, NFI_PAGEFMT,
1622 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1623 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1624 (spare_size_idx << spare_size_shift) |
1625 (pagesize_idx << NFI_PAGE_SIZE_S) |
1626 sector_size_512);
1627
1628 return 0;
1629}
1630
1631static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1632 uint32_t snfi_caps, uint8_t *opcode,
1633 uint8_t *dummy,
1634 const struct snand_io_cap *op_cap)
1635{
1636 uint32_t i, caps;
1637
1638 caps = snfi_caps & op_cap->caps;
1639
1640 i = fls(caps);
1641 if (i > 0) {
1642 *opcode = op_cap->opcodes[i - 1].opcode;
1643 if (dummy)
1644 *dummy = op_cap->opcodes[i - 1].dummy;
1645 return i - 1;
1646 }
1647
1648 return __SNAND_IO_MAX;
1649}
1650
1651static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1652 uint32_t snfi_caps,
1653 const struct snand_io_cap *op_cap)
1654{
1655 enum snand_flash_io idx;
1656
1657 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1658 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1659 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1660 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1661 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1662 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1663 };
1664
1665 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1666 &snf->dummy_rfc, op_cap);
1667 if (idx >= __SNAND_IO_MAX) {
1668 snand_log_snfi(snf->pdev,
1669 "No capable opcode for read from cache\n");
1670 return -ENOTSUPP;
1671 }
1672
1673 snf->mode_rfc = rfc_modes[idx];
1674
1675 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1676 snf->quad_spi_op = true;
1677
1678 return 0;
1679}
1680
1681static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1682 const struct snand_io_cap *op_cap)
1683{
1684 enum snand_flash_io idx;
1685
1686 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1687 [SNAND_IO_1_1_1] = 0,
1688 [SNAND_IO_1_1_4] = 1,
1689 };
1690
1691 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1692 NULL, op_cap);
1693 if (idx >= __SNAND_IO_MAX) {
1694 snand_log_snfi(snf->pdev,
1695 "No capable opcode for program load\n");
1696 return -ENOTSUPP;
1697 }
1698
1699 snf->mode_pl = pl_modes[idx];
1700
1701 if (idx == SNAND_IO_1_1_4)
1702 snf->quad_spi_op = true;
1703
1704 return 0;
1705}
1706
1707static int mtk_snand_setup(struct mtk_snand *snf,
1708 const struct snand_flash_info *snand_info)
1709{
1710 const struct snand_mem_org *memorg = &snand_info->memorg;
1711 uint32_t i, msg_size, snfi_caps;
1712 int ret;
1713
1714 /* Calculate flash memory organization */
1715 snf->model = snand_info->model;
1716 snf->writesize = memorg->pagesize;
1717 snf->oobsize = memorg->sparesize;
1718 snf->erasesize = snf->writesize * memorg->pages_per_block;
1719 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1720 snf->size = snf->die_size * memorg->ndies;
1721 snf->num_dies = memorg->ndies;
1722
1723 snf->writesize_mask = snf->writesize - 1;
1724 snf->erasesize_mask = snf->erasesize - 1;
1725 snf->die_mask = snf->die_size - 1;
1726
1727 snf->writesize_shift = ffs(snf->writesize) - 1;
1728 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1729 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1730
1731 snf->select_die = snand_info->select_die;
1732
1733 /* Determine opcodes for read from cache/program load */
1734 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1735 if (snf->snfi_quad_spi)
1736 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1737
1738 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1739 if (ret)
1740 return ret;
1741
1742 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1743 if (ret)
1744 return ret;
1745
1746 /* ECC and page format */
1747 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1748 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1749 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1750 snf->writesize);
1751 return -ENOTSUPP;
1752 }
1753
1754 ret = mtk_snand_pagefmt_setup(snf);
1755 if (ret)
1756 return ret;
1757
1758 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1759 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1760 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1761 msg_size);
1762 if (ret)
1763 return ret;
1764
1765 nfi_write16(snf, NFI_CNFG, 0);
1766
1767 /* Tuning options */
1768 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
developer17ded802021-07-06 20:48:25 +08001769 nfi_write32(snf, SNF_DLY_CTL3, (snf->nfi_soc->sample_delay << SFCK_SAM_DLY_S));
developerfd40db22021-04-29 10:08:25 +08001770
1771 /* Interrupts */
1772 nfi_read32(snf, NFI_INTR_STA);
1773 nfi_write32(snf, NFI_INTR_EN, 0);
1774
1775 /* Clear SNF done flag */
1776 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1777 nfi_write32(snf, SNF_STA_CTL1, 0);
1778
1779 /* Initialization on all dies */
1780 for (i = 0; i < snf->num_dies; i++) {
1781 mtk_snand_select_die(snf, i);
1782
1783 /* Disable On-Die ECC engine */
1784 ret = mtk_snand_ondie_ecc_control(snf, false);
1785 if (ret)
1786 return ret;
1787
1788 /* Disable block protection */
1789 mtk_snand_unlock(snf);
1790
1791 /* Enable/disable quad-spi */
1792 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1793 }
1794
1795 mtk_snand_select_die(snf, 0);
1796
1797 return 0;
1798}
1799
1800static int mtk_snand_id_probe(struct mtk_snand *snf,
1801 const struct snand_flash_info **snand_info)
1802{
1803 uint8_t id[4], op[2];
1804 int ret;
1805
1806 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1807 op[0] = SNAND_CMD_READID;
1808 op[1] = 0;
1809 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1810 if (ret)
1811 return ret;
1812
1813 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1814 if (*snand_info)
1815 return 0;
1816
1817 /* Read SPI-NAND JEDEC ID, OP + ID */
1818 op[0] = SNAND_CMD_READID;
1819 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1820 if (ret)
1821 return ret;
1822
1823 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1824 if (*snand_info)
1825 return 0;
1826
1827 snand_log_chip(snf->pdev,
1828 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1829 id[0], id[1], id[2], id[3]);
1830
1831 return -EINVAL;
1832}
1833
1834int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1835 struct mtk_snand **psnf)
1836{
1837 const struct snand_flash_info *snand_info;
developer4da1bed2021-05-08 17:30:37 +08001838 uint32_t rawpage_size, sect_bf_size;
developerfd40db22021-04-29 10:08:25 +08001839 struct mtk_snand tmpsnf, *snf;
developerfd40db22021-04-29 10:08:25 +08001840 int ret;
1841
1842 if (!pdata || !psnf)
1843 return -EINVAL;
1844
1845 if (pdata->soc >= __SNAND_SOC_MAX) {
1846 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1847 pdata->soc);
1848 return -EINVAL;
1849 }
1850
1851 /* Dummy instance only for initial reset and id probe */
1852 tmpsnf.nfi_base = pdata->nfi_base;
1853 tmpsnf.ecc_base = pdata->ecc_base;
1854 tmpsnf.soc = pdata->soc;
1855 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1856 tmpsnf.pdev = dev;
1857
1858 /* Switch to SNFI mode */
1859 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1860
1861 /* Reset SNFI & NFI */
1862 mtk_snand_mac_reset(&tmpsnf);
1863 mtk_nfi_reset(&tmpsnf);
1864
1865 /* Reset SPI-NAND chip */
1866 ret = mtk_snand_chip_reset(&tmpsnf);
1867 if (ret) {
1868 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1869 return ret;
1870 }
1871
1872 /* Probe SPI-NAND flash by JEDEC ID */
1873 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1874 if (ret)
1875 return ret;
1876
1877 rawpage_size = snand_info->memorg.pagesize +
1878 snand_info->memorg.sparesize;
1879
developer4da1bed2021-05-08 17:30:37 +08001880 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1881 sizeof(*snf->sect_bf);
1882
developerfd40db22021-04-29 10:08:25 +08001883 /* Allocate memory for instance and cache */
developer4da1bed2021-05-08 17:30:37 +08001884 snf = generic_mem_alloc(dev,
1885 sizeof(*snf) + rawpage_size + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001886 if (!snf) {
1887 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1888 return -ENOMEM;
1889 }
1890
developer4da1bed2021-05-08 17:30:37 +08001891 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1892 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
developerfd40db22021-04-29 10:08:25 +08001893
1894 /* Allocate memory for DMA buffer */
1895 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1896 if (!snf->page_cache) {
1897 generic_mem_free(dev, snf);
1898 snand_log_chip(dev,
1899 "Failed to allocate memory for DMA buffer\n");
1900 return -ENOMEM;
1901 }
1902
1903 /* Fill up instance */
1904 snf->pdev = dev;
1905 snf->nfi_base = pdata->nfi_base;
1906 snf->ecc_base = pdata->ecc_base;
1907 snf->soc = pdata->soc;
1908 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1909 snf->snfi_quad_spi = pdata->quad_spi;
1910
1911 /* Initialize SNFI & ECC engine */
1912 ret = mtk_snand_setup(snf, snand_info);
1913 if (ret) {
1914 dma_mem_free(dev, snf->page_cache);
1915 generic_mem_free(dev, snf);
1916 return ret;
1917 }
1918
1919 *psnf = snf;
1920
1921 return 0;
1922}
1923
1924int mtk_snand_cleanup(struct mtk_snand *snf)
1925{
1926 if (!snf)
1927 return 0;
1928
1929 dma_mem_free(snf->pdev, snf->page_cache);
1930 generic_mem_free(snf->pdev, snf);
1931
1932 return 0;
1933}