blob: df7ec8579503aff0b46d235060853836c4426da6 [file] [log] [blame]
Jiafei Panf0b86b12021-10-21 16:14:18 +08001/*
2 * Copyright 2022 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <string.h>
8
9#include <common/debug.h>
10#include <drivers/io/io_block.h>
11#include "ifc.h"
12#include <lib/xlat_tables/xlat_tables_v2.h>
13#include <nxp_timer.h>
14
15/* Private structure for NAND driver data */
16static struct nand_info nand_drv_data;
17
18static int update_bbt(uint32_t idx, uint32_t blk, uint32_t *updated,
19 struct nand_info *nand);
20
21static int nand_wait(struct nand_info *nand)
22{
23 int timeout = 1;
24 uint32_t neesr;
25 unsigned long start_time;
26
27 start_time = get_timer_val(0);
28
29 while (get_timer_val(start_time) < NAND_TIMEOUT_MS) {
30 /* clear the OPC event */
31 neesr = read_reg(nand, NAND_EVTER_STAT);
32 if (neesr & NAND_EVTER_STAT_OPC_DN) {
33 write_reg(nand, NAND_EVTER_STAT, neesr);
34 timeout = 0;
35
36 /* check for other errors */
37 if (neesr & NAND_EVTER_STAT_FTOER) {
38 ERROR("%s NAND_EVTER_STAT_FTOER occurs\n",
39 __func__);
40 return -1;
41 } else if (neesr & NAND_EVTER_STAT_ECCER) {
42 ERROR("%s NAND_EVTER_STAT_ECCER occurs\n",
43 __func__);
44 return -1;
45 } else if (neesr & NAND_EVTER_STAT_DQSER) {
46 ERROR("%s NAND_EVTER_STAT_DQSER occurs\n",
47 __func__);
48 return -1;
49 }
50
51 break;
52 }
53 }
54
55 if (timeout) {
56 ERROR("%s ERROR_NAND_TIMEOUT occurs\n", __func__);
57 return -1;
58 }
59
60 return 0;
61}
62
63static uint32_t nand_get_port_size(struct nand_info *nand)
64{
65 uint32_t port_size = U(0);
66 uint32_t cs_reg;
67 uint32_t cur_cs;
68
69 cur_cs = U(0);
70 cs_reg = CSPR(cur_cs);
71 port_size = (read_reg(nand, cs_reg) & CSPR_PS) >> CSPR_PS_SHIFT;
72 switch (port_size) {
73 case CSPR_PS_8:
74 port_size = U(8);
75 break;
76 case CSPR_PS_16:
77 port_size = U(16);
78 break;
79 case CSPR_PS_32:
80 port_size = U(32);
81 break;
82 default:
83 port_size = U(8);
84 }
85
86 return port_size;
87}
88
89static uint32_t nand_get_page_size(struct nand_info *nand)
90{
91 uint32_t pg_size;
92 uint32_t cs_reg;
93 uint32_t cur_cs;
94
95 cur_cs = 0;
96 cs_reg = CSOR(cur_cs);
97 pg_size = read_reg(nand, cs_reg) & CSOR_NAND_PGS;
98 switch (pg_size) {
99 case CSOR_NAND_PGS_2K:
100 pg_size = U(2048);
101 break;
102 case CSOR_NAND_PGS_4K:
103 pg_size = U(4096);
104 break;
105 case CSOR_NAND_PGS_8K:
106 pg_size = U(8192);
107 break;
108 case CSOR_NAND_PGS_16K:
109 pg_size = U(16384);
110 break;
111 default:
112 pg_size = U(512);
113 }
114
115 return pg_size;
116}
117
118static uint32_t nand_get_pages_per_blk(struct nand_info *nand)
119{
120 uint32_t pages_per_blk;
121 uint32_t cs_reg;
122 uint32_t cur_cs;
123
124 cur_cs = 0;
125 cs_reg = CSOR(cur_cs);
126 pages_per_blk = (read_reg(nand, cs_reg) & CSOR_NAND_PB);
127 switch (pages_per_blk) {
128 case CSOR_NAND_PB_32:
129 pages_per_blk = U(32);
130 break;
131 case CSOR_NAND_PB_64:
132 pages_per_blk = U(64);
133 break;
134 case CSOR_NAND_PB_128:
135 pages_per_blk = U(128);
136 break;
137 case CSOR_NAND_PB_256:
138 pages_per_blk = U(256);
139 break;
140 case CSOR_NAND_PB_512:
141 pages_per_blk = U(512);
142 break;
143 case CSOR_NAND_PB_1024:
144 pages_per_blk = U(1024);
145 break;
146 case CSOR_NAND_PB_2048:
147 pages_per_blk = U(2048);
148 break;
149 default:
150 pages_per_blk = U(0);
151 }
152
153 return pages_per_blk;
154}
155
156static uint32_t get_page_index_width(uint32_t ppb)
157{
158 switch (ppb) {
159 case CSOR_NAND_PPB_32:
160 return U(5);
161 case CSOR_NAND_PPB_64:
162 return U(6);
163 case CSOR_NAND_PPB_128:
164 return U(7);
165 case CSOR_NAND_PPB_256:
166 return U(8);
167 case CSOR_NAND_PPB_512:
168 return U(9);
169 case CSOR_NAND_PPB_1024:
170 return U(10);
171 case CSOR_NAND_PPB_2048:
172 return U(11);
173 default:
174 return U(5);
175 }
176}
177
178static void nand_get_params(struct nand_info *nand)
179{
180 nand->port_size = nand_get_port_size(nand);
181
182 nand->page_size = nand_get_page_size(nand);
183
184 /*
185 * Set Bad marker Location for LP / SP
186 * Small Page : 8 Bit : 0x5
187 * Small Page : 16 Bit : 0xa
188 * Large Page : 8 /16 Bit : 0x0
189 */
190 nand->bad_marker_loc = (nand->page_size == 512) ?
191 ((nand->port_size == 8) ? 0x5 : 0xa) : 0;
192
193 /* check for the device is ONFI compliant or not */
194 nand->onfi_dev_flag =
195 (read_reg(nand, NAND_EVTER_STAT) & NAND_EVTER_STAT_BBI_SRCH_SEL)
196 ? 1 : 0;
197
198 /* NAND Blk serached count for incremental Bad block search cnt */
199 nand->bbs = 0;
200
201 /* pages per Block */
202 nand->ppb = nand_get_pages_per_blk(nand);
203
204 /* Blk size */
205 nand->blk_size = nand->page_size * nand->ppb;
206
207 /* get_page_index_width */
208 nand->pi_width = get_page_index_width(nand->ppb);
209
210 /* bad block table init */
211 nand->lgb = 0;
212 nand->bbt_max = 0;
213 nand->bzero_good = 0;
214 memset(nand->bbt, EMPTY_VAL, BBT_SIZE * sizeof(nand->bbt[0]));
215}
216
217static int nand_init(struct nand_info *nand)
218{
219 uint32_t ncfgr = 0;
220
221 /* Get nand Parameters from IFC */
222 nand_get_params(nand);
223
224 /* Clear all errors */
225 write_reg(nand, NAND_EVTER_STAT, U(0xffffffff));
226
227 /*
228 * Disable autoboot in NCFGR. Mapping will change from
229 * physical to logical for SRAM buffer
230 */
231 ncfgr = read_reg(nand, NCFGR);
232 write_reg(nand, NCFGR, (ncfgr & ~NCFGR_BOOT));
233
234 return 0;
235}
236
237static int nand_read_data(
238 uintptr_t ifc_region_addr,
239 uint32_t row_add,
240 uint32_t col_add,
241 uint32_t byte_cnt,
242 uint8_t *data,
243 uint32_t main_spare,
244 struct nand_info *nand)
245{
246 uint32_t page_size_add_bits = U(0);
247 uint32_t page_add_in_actual, page_add;
248 uintptr_t sram_addr_calc;
249 int ret;
250 uint32_t col_val;
251
252 /* Programming MS bit to read from spare area.*/
253 col_val = (main_spare << NAND_COL_MS_SHIFT) | col_add;
254
255 write_reg(nand, NAND_BC, byte_cnt);
256
257 write_reg(nand, ROW0, row_add);
258 write_reg(nand, COL0, col_val);
259
260 /* Program FCR for small Page */
261 if (nand->page_size == U(512)) {
262 if (byte_cnt == 0 ||
263 (byte_cnt != 0 && main_spare == 0 && col_add <= 255)) {
264 write_reg(nand, NAND_FCR0,
265 (NAND_CMD_READ0 << FCR_CMD0_SHIFT));
266 } else if (main_spare == 0) {
267 write_reg(nand, NAND_FCR0,
268 (NAND_CMD_READ1 << FCR_CMD0_SHIFT));
269 } else {
270 write_reg(nand, NAND_FCR0,
271 (NAND_CMD_READOOB << FCR_CMD0_SHIFT));
272 }
273
274 } else {
275 /* Program FCR for Large Page */
276 write_reg(nand, NAND_FCR0, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
277 (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
278 }
279 if (nand->page_size == U(512)) {
280 write_reg(nand, NAND_FIR0, ((FIR_OP_CW0 << FIR_OP0_SHIFT) |
281 (FIR_OP_CA0 << FIR_OP1_SHIFT) |
282 (FIR_OP_RA0 << FIR_OP2_SHIFT) |
283 (FIR_OP_BTRD << FIR_OP3_SHIFT) |
284 (FIR_OP_NOP << FIR_OP4_SHIFT)));
285 write_reg(nand, NAND_FIR1, U(0x00000000));
286 } else {
287 write_reg(nand, NAND_FIR0, ((FIR_OP_CW0 << FIR_OP0_SHIFT) |
288 (FIR_OP_CA0 << FIR_OP1_SHIFT) |
289 (FIR_OP_RA0 << FIR_OP2_SHIFT) |
290 (FIR_OP_CMD1 << FIR_OP3_SHIFT) |
291 (FIR_OP_BTRD << FIR_OP4_SHIFT)));
292
293 write_reg(nand, NAND_FIR1, (FIR_OP_NOP << FIR_OP5_SHIFT));
294 }
295 write_reg(nand, NANDSEQ_STRT, NAND_SEQ_STRT_FIR_STRT);
296
297 ret = nand_wait(nand);
298 if (ret != 0)
299 return ret;
300
301 /* calculate page_size_add_bits i.e bits
302 * in sram address corresponding to area
303 * within a page for sram
304 */
305 if (nand->page_size == U(512))
306 page_size_add_bits = U(10);
307 else if (nand->page_size == U(2048))
308 page_size_add_bits = U(12);
309 else if (nand->page_size == U(4096))
310 page_size_add_bits = U(13);
311 else if (nand->page_size == U(8192))
312 page_size_add_bits = U(14);
313 else if (nand->page_size == U(16384))
314 page_size_add_bits = U(15);
315
316 page_add = row_add;
317
318 page_add_in_actual = (page_add << page_size_add_bits) & U(0x0000FFFF);
319
320 if (byte_cnt == 0)
321 col_add = U(0);
322
323 /* Calculate SRAM address for main and spare area */
324 if (main_spare == 0)
325 sram_addr_calc = ifc_region_addr | page_add_in_actual | col_add;
326 else
327 sram_addr_calc = ifc_region_addr | page_add_in_actual |
328 (col_add + nand->page_size);
329
330 /* Depending Byte_count copy full page or partial page from SRAM */
331 if (byte_cnt == 0)
332 memcpy(data, (void *)sram_addr_calc,
333 nand->page_size);
334 else
335 memcpy(data, (void *)sram_addr_calc, byte_cnt);
336
337 return 0;
338}
339
340static int nand_read(struct nand_info *nand, int32_t src_addr,
341 uintptr_t dst, uint32_t size)
342{
343 uint32_t log_blk = U(0);
344 uint32_t pg_no = U(0);
345 uint32_t col_off = U(0);
346 uint32_t row_off = U(0);
347 uint32_t byte_cnt = U(0);
348 uint32_t read_cnt = U(0);
349 uint32_t i = U(0);
350 uint32_t updated = U(0);
351
352 int ret = 0;
353 uint8_t *out = (uint8_t *)dst;
354
355 uint32_t pblk;
356
357 /* loop till size */
358 while (size) {
359 log_blk = (src_addr / nand->blk_size);
360 pg_no = ((src_addr - (log_blk * nand->blk_size)) /
361 nand->page_size);
362 pblk = log_blk;
363
364 // iterate the bbt to find the block
365 for (i = 0; i <= nand->bbt_max; i++) {
366 if (nand->bbt[i] == EMPTY_VAL_CHECK) {
367 ret = update_bbt(i, pblk, &updated, nand);
368
369 if (ret != 0)
370 return ret;
371 /*
372 * if table not updated and we reached
373 * end of table
374 */
375 if (!updated)
376 break;
377 }
378
379 if (pblk < nand->bbt[i])
380 break;
381 else if (pblk >= nand->bbt[i])
382 pblk++;
383 }
384
385 col_off = (src_addr % nand->page_size);
386 if (col_off) {
387 if ((col_off + size) < nand->page_size)
388 byte_cnt = size;
389 else
390 byte_cnt = nand->page_size - col_off;
391
392 row_off = (pblk << nand->pi_width) | pg_no;
393
394 ret = nand_read_data(
395 nand->ifc_region_addr,
396 row_off,
397 col_off,
398 byte_cnt, out, MAIN, nand);
399
400 if (ret != 0)
401 return ret;
402 } else {
403 /*
404 * fullpage/Partial Page
405 * if byte_cnt = 0 full page
406 * else partial page
407 */
408 if (size < nand->page_size) {
409 byte_cnt = size;
410 read_cnt = size;
411 } else {
412 byte_cnt = nand->page_size;
413 read_cnt = 0;
414 }
415 row_off = (pblk << nand->pi_width) | pg_no;
416
417 ret = nand_read_data(
418 nand->ifc_region_addr,
419 row_off,
420 0,
421 read_cnt, out, MAIN, nand);
422
423 if (ret != 0) {
424 ERROR("Error from nand-read_data %d\n", ret);
425 return ret;
426 }
427 }
428 src_addr += byte_cnt;
429 out += byte_cnt;
430 size -= byte_cnt;
431 }
432 return 0;
433}
434
435static int isgoodblock(uint32_t blk, uint32_t *gb, struct nand_info *nand)
436{
437 uint8_t buf[2];
438 int ret;
439 uint32_t row_add;
440
441 *gb = 0;
442
443 /* read Page 0 of blk */
444 ret = nand_read_data(
445 nand->ifc_region_addr,
446 blk << nand->pi_width,
447 nand->bad_marker_loc,
448 0x2, buf, 1, nand);
449
450 if (ret != 0)
451 return ret;
452
453 /* For ONFI devices check Page 0 and Last page of block for
454 * Bad Marker and for NON-ONFI Page 0 and 1 for Bad Marker
455 */
456 row_add = (blk << nand->pi_width);
457 if (nand->port_size == 8) {
458 /* port size is 8 Bit */
459 /* check if page 0 has 0xff */
460 if (buf[0] == 0xff) {
461 /* check page 1 */
462 if (nand->onfi_dev_flag)
463 ret = nand_read_data(
464 nand->ifc_region_addr,
465 row_add | (nand->ppb - 1),
466 nand->bad_marker_loc,
467 0x2, buf, SPARE, nand);
468 else
469 ret = nand_read_data(
470 nand->ifc_region_addr,
471 row_add | 1,
472 nand->bad_marker_loc,
473 0x2, buf, SPARE, nand);
474
475 if (ret != 0)
476 return ret;
477
478 if (buf[0] == 0xff)
479 *gb = GOOD_BLK;
480 else
481 *gb = BAD_BLK;
482 } else {
483 /* no, so it is bad blk */
484 *gb = BAD_BLK;
485 }
486 } else {
487 /* Port size 16-Bit */
488 /* check if page 0 has 0xffff */
489 if ((buf[0] == 0xff) &&
490 (buf[1] == 0xff)) {
491 /* check page 1 for 0xffff */
492 if (nand->onfi_dev_flag) {
493 ret = nand_read_data(
494 nand->ifc_region_addr,
495 row_add | (nand->ppb - 1),
496 nand->bad_marker_loc,
497 0x2, buf, SPARE, nand);
498 } else {
499 ret = nand_read_data(
500 nand->ifc_region_addr,
501 row_add | 1,
502 nand->bad_marker_loc,
503 0x2, buf, SPARE, nand);
504 }
505
506 if (ret != 0)
507 return ret;
508
509 if ((buf[0] == 0xff) &&
510 (buf[1] == 0xff)) {
511 *gb = GOOD_BLK;
512 } else {
513 *gb = BAD_BLK;
514 }
515 } else {
516 /* no, so it is bad blk */
517 *gb = BAD_BLK;
518 }
519 }
520 return 0;
521}
522
523static int update_bbt(uint32_t idx, uint32_t blk,
524 uint32_t *updated, struct nand_info *nand)
525{
526 uint32_t sblk;
527 uint32_t lgb;
528 int ret;
529
530 if (nand->bzero_good && blk == 0)
531 return 0;
532
533 /* special case for lgb == 0 */
Elyes Haouas2be03c02023-02-13 09:14:48 +0100534 /* if blk <= lgb return */
Jiafei Panf0b86b12021-10-21 16:14:18 +0800535 if (nand->lgb != 0 && blk <= nand->lgb)
536 return 0;
537
538 *updated = 0;
539
540 /* if blk is more than lgb, iterate from lgb till a good block
541 * is found for blk
542 */
543
544 if (nand->lgb < blk)
545 sblk = nand->lgb;
546 else
547 /* this is when lgb = 0 */
548 sblk = blk;
549
550
551 lgb = nand->lgb;
552
553 /* loop from blk to find a good block */
554 while (1) {
555 while (lgb <= sblk) {
556 uint32_t gb = 0;
557
558 ret = isgoodblock(lgb, &gb, nand);
559 if (ret != 0)
560 return ret;
561
562 /* special case block 0 is good then set this flag */
563 if (lgb == 0 && gb == GOOD_BLK)
564 nand->bzero_good = 1;
565
566 if (gb == BAD_BLK) {
567 if (idx >= BBT_SIZE) {
568 ERROR("NAND BBT Table full\n");
569 return -1;
570 }
571 *updated = 1;
572 nand->bbt[idx] = lgb;
573 idx++;
574 blk++;
575 sblk++;
576 if (idx > nand->bbt_max)
577 nand->bbt_max = idx;
578 }
579 lgb++;
580 }
581 /* the access block found */
582 if (sblk == blk) {
583 /* when good block found update lgb */
584 nand->lgb = blk;
585 break;
586 }
587 sblk++;
588 }
589
590 return 0;
591}
592
593static size_t ifc_nand_read(int lba, uintptr_t buf, size_t size)
594{
595 int ret;
596 uint32_t page_size;
597 uint32_t src_addr;
598 struct nand_info *nand = &nand_drv_data;
599
600 page_size = nand_get_page_size(nand);
601 src_addr = lba * page_size;
602 ret = nand_read(nand, src_addr, buf, size);
603 return ret ? 0 : size;
604}
605
606static struct io_block_dev_spec ifc_nand_spec = {
607 .buffer = {
608 .offset = 0,
609 .length = 0,
610 },
611 .ops = {
612 .read = ifc_nand_read,
613 },
614 /*
615 * Default block size assumed as 2K
616 * Would be updated based on actual size
617 */
618 .block_size = UL(2048),
619};
620
621int ifc_nand_init(uintptr_t *block_dev_spec,
622 uintptr_t ifc_region_addr,
623 uintptr_t ifc_register_addr,
624 size_t ifc_sram_size,
625 uintptr_t ifc_nand_blk_offset,
626 size_t ifc_nand_blk_size)
627{
628 struct nand_info *nand = NULL;
629 int ret;
630
631 nand = &nand_drv_data;
632 memset(nand, 0, sizeof(struct nand_info));
633
634 nand->ifc_region_addr = ifc_region_addr;
635 nand->ifc_register_addr = ifc_register_addr;
636
637 VERBOSE("nand_init\n");
638 ret = nand_init(nand);
639 if (ret) {
640 ERROR("nand init failed\n");
641 return ret;
642 }
643
644 ifc_nand_spec.buffer.offset = ifc_nand_blk_offset;
645 ifc_nand_spec.buffer.length = ifc_nand_blk_size;
646
647 ifc_nand_spec.block_size = nand_get_page_size(nand);
648
649 VERBOSE("Page size is %ld\n", ifc_nand_spec.block_size);
650
651 *block_dev_spec = (uintptr_t)&ifc_nand_spec;
652
653 /* Adding NAND SRAM< Buffer in XLAT Table */
654 mmap_add_region(ifc_region_addr, ifc_region_addr,
655 ifc_sram_size, MT_DEVICE | MT_RW);
656
657 return 0;
658}