blob: 8bbef3fcb79bd6595a2c7dcd97efe1b74b911212 [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2021 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "nmbm-private.h"
9
10#include "nmbm-debug.h"
11
12#define NMBM_VER_MAJOR 1
13#define NMBM_VER_MINOR 0
14#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
15 NMBM_VER_MINOR)
16
17#define NMBM_ALIGN(v, a) (((v) + (a) - 1) & ~((a) - 1))
18
19/*****************************************************************************/
20/* Logging related functions */
21/*****************************************************************************/
22
23/*
24 * nmbm_log_lower - Print log using OS specific routine
25 * @nld: NMBM lower device structure
26 * @level: log level
27 * @fmt: format string
28 */
29static void nmbm_log_lower(struct nmbm_lower_device *nld,
30 enum nmbm_log_category level, const char *fmt, ...)
31{
32 va_list ap;
33
34 if (!nld->logprint)
35 return;
36
37 va_start(ap, fmt);
38 nld->logprint(nld->arg, level, fmt, ap);
39 va_end(ap);
40}
41
42/*
43 * nmbm_log - Print log using OS specific routine
44 * @ni: NMBM instance structure
45 * @level: log level
46 * @fmt: format string
47 */
48static void nmbm_log(struct nmbm_instance *ni, enum nmbm_log_category level,
49 const char *fmt, ...)
50{
51 va_list ap;
52
53 if (!ni)
54 return;
55
56 if (!ni->lower.logprint || level < ni->log_display_level)
57 return;
58
59 va_start(ap, fmt);
60 ni->lower.logprint(ni->lower.arg, level, fmt, ap);
61 va_end(ap);
62}
63
64/*
65 * nmbm_set_log_level - Set log display level
66 * @ni: NMBM instance structure
67 * @level: log display level
68 */
69enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
70 enum nmbm_log_category level)
71{
72 enum nmbm_log_category old;
73
74 if (!ni)
75 return __NMBM_LOG_MAX;
76
77 old = ni->log_display_level;
78 ni->log_display_level = level;
79 return old;
80}
81
82/*
83 * nlog_table_creation - Print log of table creation event
84 * @ni: NMBM instance structure
85 * @main_table: whether the table is main info table
86 * @start_ba: start block address of the table
87 * @end_ba: block address after the end of the table
88 */
89static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
90 uint32_t start_ba, uint32_t end_ba)
91{
92 if (start_ba == end_ba - 1)
93 nlog_info(ni, "%s info table has been written to block %u\n",
94 main_table ? "Main" : "Backup", start_ba);
95 else
96 nlog_info(ni, "%s info table has been written to block %u-%u\n",
97 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
98
99 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
100}
101
102/*
103 * nlog_table_update - Print log of table update event
104 * @ni: NMBM instance structure
105 * @main_table: whether the table is main info table
106 * @start_ba: start block address of the table
107 * @end_ba: block address after the end of the table
108 */
109static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
110 uint32_t start_ba, uint32_t end_ba)
111{
112 if (start_ba == end_ba - 1)
113 nlog_debug(ni, "%s info table has been updated in block %u\n",
114 main_table ? "Main" : "Backup", start_ba);
115 else
116 nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
117 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
118
119 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
120}
121
122/*
123 * nlog_table_found - Print log of table found event
124 * @ni: NMBM instance structure
125 * @first_table: whether the table is first found info table
126 * @write_count: write count of the info table
127 * @start_ba: start block address of the table
128 * @end_ba: block address after the end of the table
129 */
130static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
131 uint32_t write_count, uint32_t start_ba,
132 uint32_t end_ba)
133{
134 if (start_ba == end_ba - 1)
135 nlog_info(ni, "%s info table with writecount %u found in block %u\n",
136 first_table ? "First" : "Second", write_count,
137 start_ba);
138 else
139 nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
140 first_table ? "First" : "Second", write_count,
141 start_ba, end_ba - 1);
142
143 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
144}
145
146/*****************************************************************************/
147/* Address conversion functions */
148/*****************************************************************************/
149
150/*
151 * addr2ba - Convert a linear address to block address
152 * @ni: NMBM instance structure
153 * @addr: Linear address
154 */
155static uint32_t addr2ba(struct nmbm_instance *ni, uint64_t addr)
156{
157 return addr >> ni->erasesize_shift;
158}
159
160/*
161 * ba2addr - Convert a block address to linear address
162 * @ni: NMBM instance structure
163 * @ba: Block address
164 */
165static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
166{
167 return (uint64_t)ba << ni->erasesize_shift;
168}
169/*
170 * size2blk - Get minimum required blocks for storing specific size of data
171 * @ni: NMBM instance structure
172 * @size: size for storing
173 */
174static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
175{
176 return (size + ni->lower.erasesize - 1) >> ni->erasesize_shift;
177}
178
179/*****************************************************************************/
180/* High level NAND chip APIs */
181/*****************************************************************************/
182
183/*
184 * nmbm_reset_chip - Reset NAND device
185 * @nld: Lower NAND chip structure
186 */
187static void nmbm_reset_chip(struct nmbm_instance *ni)
188{
189 if (ni->lower.reset_chip)
190 ni->lower.reset_chip(ni->lower.arg);
191}
192
193/*
194 * nmbm_read_phys_page - Read page with retry
195 * @ni: NMBM instance structure
196 * @addr: linear address where the data will be read from
197 * @data: the main data to be read
198 * @oob: the oob data to be read
199 * @mode: mode for processing oob data
200 *
201 * Read a page for at most NMBM_TRY_COUNT times.
202 *
203 * Return 0 for success, positive value for ecc error,
204 * negative value for other errors
205 */
206static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
207 void *data, void *oob, enum nmbm_oob_mode mode)
208{
209 int tries, ret;
210
211 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
212 ret = ni->lower.read_page(ni->lower.arg, addr, data, oob, mode);
213 if (!ret)
214 return 0;
215
216 nmbm_reset_chip(ni);
217 }
218
219 if (ret < 0)
220 nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
221
222 return ret;
223}
224
225/*
226 * nmbm_write_phys_page - Write page with retry
227 * @ni: NMBM instance structure
228 * @addr: linear address where the data will be written to
229 * @data: the main data to be written
230 * @oob: the oob data to be written
231 * @mode: mode for processing oob data
232 *
233 * Write a page for at most NMBM_TRY_COUNT times.
234 */
235static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
236 const void *data, const void *oob,
237 enum nmbm_oob_mode mode)
238{
239 int tries, ret;
240
241 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
242 ret = ni->lower.write_page(ni->lower.arg, addr, data, oob, mode);
243 if (!ret)
244 return true;
245
246 nmbm_reset_chip(ni);
247 }
248
249 nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
250
251 return false;
252}
253
254/*
255 * nmbm_erase_phys_block - Erase a block with retry
256 * @ni: NMBM instance structure
257 * @addr: Linear address
258 *
259 * Erase a block for at most NMBM_TRY_COUNT times.
260 */
261static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
262{
263 int tries, ret;
264
265 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
266 ret = ni->lower.erase_block(ni->lower.arg, addr);
267 if (!ret)
268 return true;
269
270 nmbm_reset_chip(ni);
271 }
272
273 nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
274
275 return false;
276}
277
278/*
279 * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
280 * @ni: NMBM instance structure
281 * @ba: block address
282 */
283static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
284{
285 uint64_t addr = ba2addr(ni, ba);
286 int ret;
287
288 if (ni->lower.is_bad_block)
289 return ni->lower.is_bad_block(ni->lower.arg, addr);
290
291 /* Treat ECC error as read success */
292 ret = nmbm_read_phys_page(ni, addr, NULL,
293 ni->page_cache + ni->lower.writesize,
developerd8912b32021-06-16 17:22:36 +0800294 NMBM_MODE_RAW);
developer8d16ac22021-05-26 15:32:12 +0800295 if (ret < 0)
296 return true;
297
298 return ni->page_cache[ni->lower.writesize] != 0xff;
299}
300
301/*
302 * nmbm_mark_phys_bad_block - Mark a block bad
303 * @ni: NMBM instance structure
304 * @addr: Linear address
305 */
306static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
307{
308 uint64_t addr = ba2addr(ni, ba);
309 enum nmbm_log_category level;
310 uint32_t off;
311
312 nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
313
314 if (ni->lower.mark_bad_block)
315 return ni->lower.mark_bad_block(ni->lower.arg, addr);
316
317 /* Whole page set to 0x00 */
318 memset(ni->page_cache, 0, ni->rawpage_size);
319
320 /* Write to all pages within this block, disable all errors */
321 level = nmbm_set_log_level(ni, __NMBM_LOG_MAX);
322
323 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
324 nmbm_write_phys_page(ni, addr + off, ni->page_cache,
325 ni->page_cache + ni->lower.writesize,
326 NMBM_MODE_RAW);
327 }
328
329 nmbm_set_log_level(ni, level);
330
331 return 0;
332}
333
334/*****************************************************************************/
335/* NMBM related functions */
336/*****************************************************************************/
337
338/*
339 * nmbm_check_header - Check whether a NMBM structure is valid
340 * @data: pointer to a NMBM structure with a NMBM header at beginning
341 * @size: Size of the buffer pointed by @header
342 *
343 * The size of the NMBM structure may be larger than NMBM header,
344 * e.g. block mapping table and block state table.
345 */
346static bool nmbm_check_header(const void *data, uint32_t size)
347{
348 const struct nmbm_header *header = data;
349 struct nmbm_header nhdr;
350 uint32_t new_checksum;
351
352 /*
353 * Make sure expected structure size is equal or smaller than
354 * buffer size.
355 */
356 if (header->size > size)
357 return false;
358
359 memcpy(&nhdr, data, sizeof(nhdr));
360
361 nhdr.checksum = 0;
362 new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
363 if (header->size > sizeof(nhdr))
364 new_checksum = nmbm_crc32(new_checksum,
365 (const uint8_t *)data + sizeof(nhdr),
366 header->size - sizeof(nhdr));
367
368 if (header->checksum != new_checksum)
369 return false;
370
371 return true;
372}
373
374/*
375 * nmbm_update_checksum - Update checksum of a NMBM structure
376 * @header: pointer to a NMBM structure with a NMBM header at beginning
377 *
378 * The size of the NMBM structure must be specified by @header->size
379 */
380static void nmbm_update_checksum(struct nmbm_header *header)
381{
382 header->checksum = 0;
383 header->checksum = nmbm_crc32(0, header, header->size);
384}
385
386/*
387 * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
388 * @block_count: number of blocks of data
389 *
390 * Calculate number of blocks should be reserved for data
391 */
392static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
393{
394 uint32_t val;
395
396 val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
397 val *= NMBM_SPARE_BLOCK_MULTI;
398
399 if (val < NMBM_SPARE_BLOCK_MIN)
400 val = NMBM_SPARE_BLOCK_MIN;
401
402 return val;
403}
404
405/*
406 * nmbm_get_block_state_raw - Get state of a block from raw block state table
407 * @block_state: pointer to raw block state table (bitmap)
408 * @ba: block address
409 */
410static uint32_t nmbm_get_block_state_raw(nmbm_bitmap_t *block_state,
411 uint32_t ba)
412{
413 uint32_t unit, shift;
414
415 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
416 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
417
418 return (block_state[unit] >> shift) & BLOCK_ST_MASK;
419}
420
421/*
422 * nmbm_get_block_state - Get state of a block from block state table
423 * @ni: NMBM instance structure
424 * @ba: block address
425 */
426static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
427{
428 return nmbm_get_block_state_raw(ni->block_state, ba);
429}
430
431/*
432 * nmbm_set_block_state - Set state of a block to block state table
433 * @ni: NMBM instance structure
434 * @ba: block address
435 * @state: block state
436 *
437 * Set state of a block. If the block state changed, ni->block_state_changed
438 * will be increased.
439 */
440static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
441 uint32_t state)
442{
443 uint32_t unit, shift, orig;
444 nmbm_bitmap_t uv;
445
446 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
447 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
448
449 orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
450 state &= BLOCK_ST_MASK;
451
452 uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
453 uv |= state << shift;
454 ni->block_state[unit] = uv;
455
456 if (state == BLOCK_ST_BAD)
457 nmbm_mark_block_color_bad(ni, ba);
458
459 if (orig != state) {
460 ni->block_state_changed++;
461 return true;
462 }
463
464 return false;
465}
466
467/*
468 * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
469 * @ni: NMBM instance structure
470 * @ba: start physical block address
471 * @nba: return physical block address after walk
472 * @count: number of good blocks to be skipped
473 * @limit: highest block address allowed for walking
474 *
475 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
476 * return the next good block address.
477 *
478 * If no enough good blocks counted while @limit reached, false will be returned.
479 *
480 * If @count == 0, nearest good block address will be returned.
481 * @limit is not counted in walking.
482 */
483static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
484 uint32_t *nba, uint32_t count,
485 uint32_t limit)
486{
487 int32_t nblock = count;
488
489 if (limit >= ni->block_count)
490 limit = ni->block_count - 1;
491
492 while (ba < limit) {
493 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
494 nblock--;
495
496 if (nblock < 0) {
497 *nba = ba;
498 return true;
499 }
500
501 ba++;
502 }
503
504 return false;
505}
506
507/*
508 * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
509 * @ni: NMBM instance structure
510 * @ba: start physical block address
511 * @nba: return physical block address after walk
512 * @count: number of good blocks to be skipped
513 * @limit: lowest block address allowed for walking
514 *
515 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
516 * return the next good block address.
517 *
518 * If no enough good blocks counted while @limit reached, false will be returned.
519 *
520 * If @count == 0, nearest good block address will be returned.
521 * @limit is not counted in walking.
522 */
523static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
524 uint32_t *nba, uint32_t count, uint32_t limit)
525{
526 int32_t nblock = count;
527
528 if (limit >= ni->block_count)
529 limit = ni->block_count - 1;
530
531 while (ba > limit) {
532 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
533 nblock--;
534
535 if (nblock < 0) {
536 *nba = ba;
537 return true;
538 }
539
540 ba--;
541 }
542
543 return false;
544}
545
546/*
547 * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
548 * @ni: NMBM instance structure
549 * @ascending: whether to walk ascending
550 * @ba: start physical block address
551 * @nba: return physical block address after walk
552 * @count: number of good blocks to be skipped
553 * @limit: highest/lowest block address allowed for walking
554 *
555 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
556 * return the next good block address.
557 *
558 * If no enough good blocks counted while @limit reached, false will be returned.
559 *
560 * If @count == 0, nearest good block address will be returned.
561 * @limit can be set to negative if no limit required.
562 * @limit is not counted in walking.
563 */
564static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
565 uint32_t ba, uint32_t *nba, int32_t count,
566 int32_t limit)
567{
568 if (ascending)
569 return nmbm_block_walk_asc(ni, ba, nba, count, limit);
570
571 return nmbm_block_walk_desc(ni, ba, nba, count, limit);
572}
573
574/*
575 * nmbm_scan_badblocks - Scan and record all bad blocks
576 * @ni: NMBM instance structure
577 *
578 * Scan the entire lower NAND chip and record all bad blocks in to block state
579 * table.
580 */
581static void nmbm_scan_badblocks(struct nmbm_instance *ni)
582{
583 uint32_t ba;
584
585 for (ba = 0; ba < ni->block_count; ba++) {
586 if (nmbm_check_bad_phys_block(ni, ba)) {
587 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
588 nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
589 ba2addr(ni, ba));
590 }
591 }
592}
593
594/*
595 * nmbm_build_mapping_table - Build initial block mapping table
596 * @ni: NMBM instance structure
597 *
598 * The initial mapping table will be compatible with the stratage of
599 * factory production.
600 */
601static void nmbm_build_mapping_table(struct nmbm_instance *ni)
602{
603 uint32_t pb, lb;
604
605 for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
606 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
607 continue;
608
609 /* Always map to the next good block */
610 ni->block_mapping[lb++] = pb;
611 }
612
613 ni->data_block_count = lb;
614
615 /* Unusable/Management blocks */
616 for (pb = lb; pb < ni->block_count; pb++)
617 ni->block_mapping[pb] = -1;
618}
619
620/*
621 * nmbm_erase_range - Erase a range of blocks
622 * @ni: NMBM instance structure
623 * @ba: block address where the erasure will start
624 * @limit: top block address allowed for erasure
625 *
626 * Erase blocks within the specific range. Newly-found bad blocks will be
627 * marked.
628 *
629 * @limit is not counted into the allowed erasure address.
630 */
631static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
632 uint32_t limit)
633{
634 bool success;
635
636 while (ba < limit) {
637 WATCHDOG_RESET();
638
639 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
640 goto next_block;
641
developer4f9017d2021-06-16 17:18:47 +0800642 /* Insurance to detect unexpected bad block marked by user */
643 if (nmbm_check_bad_phys_block(ni, ba)) {
644 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
645 goto next_block;
646 }
647
developer8d16ac22021-05-26 15:32:12 +0800648 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
649 if (success)
650 goto next_block;
651
652 nmbm_mark_phys_bad_block(ni, ba);
653 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
654
655 next_block:
656 ba++;
657 }
658}
659
660/*
661 * nmbm_write_repeated_data - Write critical data to a block with retry
662 * @ni: NMBM instance structure
663 * @ba: block address where the data will be written to
664 * @data: the data to be written
665 * @size: size of the data
666 *
667 * Write data to every page of the block. Success only if all pages within
668 * this block have been successfully written.
669 *
670 * Make sure data size is not bigger than one page.
671 *
672 * This function will write and verify every page for at most
673 * NMBM_TRY_COUNT times.
674 */
675static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
676 const void *data, uint32_t size)
677{
678 uint64_t addr, off;
679 bool success;
680 int ret;
681
682 if (size > ni->lower.writesize)
683 return false;
684
685 addr = ba2addr(ni, ba);
686
687 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
688 WATCHDOG_RESET();
689
690 /* Prepare page data. fill 0xff to unused region */
691 memcpy(ni->page_cache, data, size);
692 memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
693
694 success = nmbm_write_phys_page(ni, addr + off, ni->page_cache,
695 NULL, NMBM_MODE_PLACE_OOB);
696 if (!success)
697 return false;
698
699 /* Verify the data just written. ECC error indicates failure */
700 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
701 NMBM_MODE_PLACE_OOB);
702 if (ret)
703 return false;
704
705 if (memcmp(ni->page_cache, data, size))
706 return false;
707 }
708
709 return true;
710}
711
712/*
713 * nmbm_write_signature - Write signature to NAND chip
714 * @ni: NMBM instance structure
715 * @limit: top block address allowed for writing
716 * @signature: the signature to be written
717 * @signature_ba: the actual block address where signature is written to
718 *
719 * Write signature within a specific range, from chip bottom to limit.
720 * At most one block will be written.
721 *
722 * @limit is not counted into the allowed write address.
723 */
724static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
725 const struct nmbm_signature *signature,
726 uint32_t *signature_ba)
727{
728 uint32_t ba = ni->block_count - 1;
729 bool success;
730
731 while (ba > limit) {
732 WATCHDOG_RESET();
733
734 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
735 goto next_block;
developer4f9017d2021-06-16 17:18:47 +0800736
737 /* Insurance to detect unexpected bad block marked by user */
738 if (nmbm_check_bad_phys_block(ni, ba)) {
739 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
740 goto next_block;
741 }
developer8d16ac22021-05-26 15:32:12 +0800742
743 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
744 if (!success)
745 goto skip_bad_block;
746
747 success = nmbm_write_repeated_data(ni, ba, signature,
748 sizeof(*signature));
749 if (success) {
750 *signature_ba = ba;
751 return true;
752 }
753
754 skip_bad_block:
755 nmbm_mark_phys_bad_block(ni, ba);
756 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
757
758 next_block:
759 ba--;
760 };
761
762 return false;
763}
764
765/*
766 * nmbn_read_data - Read data
767 * @ni: NMBM instance structure
768 * @addr: linear address where the data will be read from
769 * @data: the data to be read
770 * @size: the size of data
771 *
772 * Read data range.
773 * Every page will be tried for at most NMBM_TRY_COUNT times.
774 *
775 * Return 0 for success, positive value for ecc error,
776 * negative value for other errors
777 */
778static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
779 uint32_t size)
780{
781 uint64_t off = addr;
782 uint8_t *ptr = data;
783 uint32_t sizeremain = size, chunksize, leading;
784 int ret;
785
786 while (sizeremain) {
787 WATCHDOG_RESET();
788
789 leading = off & ni->writesize_mask;
790 chunksize = ni->lower.writesize - leading;
791 if (chunksize > sizeremain)
792 chunksize = sizeremain;
793
794 if (chunksize == ni->lower.writesize) {
795 ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL,
796 NMBM_MODE_PLACE_OOB);
797 if (ret)
798 return ret;
799 } else {
800 ret = nmbm_read_phys_page(ni, off - leading,
801 ni->page_cache, NULL,
802 NMBM_MODE_PLACE_OOB);
803 if (ret)
804 return ret;
805
806 memcpy(ptr, ni->page_cache + leading, chunksize);
807 }
808
809 off += chunksize;
810 ptr += chunksize;
811 sizeremain -= chunksize;
812 }
813
814 return 0;
815}
816
817/*
818 * nmbn_write_verify_data - Write data with validation
819 * @ni: NMBM instance structure
820 * @addr: linear address where the data will be written to
821 * @data: the data to be written
822 * @size: the size of data
823 *
824 * Write data and verify.
825 * Every page will be tried for at most NMBM_TRY_COUNT times.
826 */
827static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
828 const void *data, uint32_t size)
829{
830 uint64_t off = addr;
831 const uint8_t *ptr = data;
832 uint32_t sizeremain = size, chunksize, leading;
833 bool success;
834 int ret;
835
836 while (sizeremain) {
837 WATCHDOG_RESET();
838
839 leading = off & ni->writesize_mask;
840 chunksize = ni->lower.writesize - leading;
841 if (chunksize > sizeremain)
842 chunksize = sizeremain;
843
844 /* Prepare page data. fill 0xff to unused region */
845 memset(ni->page_cache, 0xff, ni->rawpage_size);
846 memcpy(ni->page_cache + leading, ptr, chunksize);
847
848 success = nmbm_write_phys_page(ni, off - leading,
849 ni->page_cache, NULL,
850 NMBM_MODE_PLACE_OOB);
851 if (!success)
852 return false;
853
854 /* Verify the data just written. ECC error indicates failure */
855 ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache,
856 NULL, NMBM_MODE_PLACE_OOB);
857 if (ret)
858 return false;
859
860 if (memcmp(ni->page_cache + leading, ptr, chunksize))
861 return false;
862
863 off += chunksize;
864 ptr += chunksize;
865 sizeremain -= chunksize;
866 }
867
868 return true;
869}
870
871/*
872 * nmbm_write_mgmt_range - Write management data into NAND within a range
873 * @ni: NMBM instance structure
874 * @addr: preferred start block address for writing
875 * @limit: highest block address allowed for writing
876 * @data: the data to be written
877 * @size: the size of data
878 * @actual_start_ba: actual start block address of data
879 * @actual_end_ba: block address after the end of data
880 *
881 * @limit is not counted into the allowed write address.
882 */
883static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
884 uint32_t limit, const void *data,
885 uint32_t size, uint32_t *actual_start_ba,
886 uint32_t *actual_end_ba)
887{
888 const uint8_t *ptr = data;
889 uint32_t sizeremain = size, chunksize;
890 bool success;
891
892 while (sizeremain && ba < limit) {
893 WATCHDOG_RESET();
894
895 chunksize = sizeremain;
896 if (chunksize > ni->lower.erasesize)
897 chunksize = ni->lower.erasesize;
898
899 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
900 goto next_block;
901
developer4f9017d2021-06-16 17:18:47 +0800902 /* Insurance to detect unexpected bad block marked by user */
903 if (nmbm_check_bad_phys_block(ni, ba)) {
904 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
905 goto next_block;
906 }
907
developer8d16ac22021-05-26 15:32:12 +0800908 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
909 if (!success)
910 goto skip_bad_block;
911
912 success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
913 chunksize);
914 if (!success)
915 goto skip_bad_block;
916
917 if (sizeremain == size)
918 *actual_start_ba = ba;
919
920 ptr += chunksize;
921 sizeremain -= chunksize;
922
923 goto next_block;
924
925 skip_bad_block:
926 nmbm_mark_phys_bad_block(ni, ba);
927 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
928
929 next_block:
930 ba++;
931 }
932
933 if (sizeremain)
934 return false;
935
936 *actual_end_ba = ba;
937
938 return true;
939}
940
941/*
942 * nmbm_generate_info_table_cache - Generate info table cache data
943 * @ni: NMBM instance structure
944 *
945 * Generate info table cache data to be written into flash.
946 */
947static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
948{
949 bool changed = false;
950
951 memset(ni->info_table_cache, 0xff, ni->info_table_size);
952
953 memcpy(ni->info_table_cache + ni->info_table.state_table_off,
954 ni->block_state, ni->state_table_size);
955
956 memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
957 ni->block_mapping, ni->mapping_table_size);
958
959 ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
960 ni->info_table.header.version = NMBM_VER;
961 ni->info_table.header.size = ni->info_table_size;
962
963 if (ni->block_state_changed || ni->block_mapping_changed) {
964 ni->info_table.write_count++;
965 changed = true;
966 }
967
968 memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
969
970 nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
971
972 return changed;
973}
974
975/*
976 * nmbm_write_info_table - Write info table into NAND within a range
977 * @ni: NMBM instance structure
978 * @ba: preferred start block address for writing
979 * @limit: highest block address allowed for writing
980 * @actual_start_ba: actual start block address of info table
981 * @actual_end_ba: block address after the end of info table
982 *
983 * @limit is counted into the allowed write address.
984 */
985static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
986 uint32_t limit, uint32_t *actual_start_ba,
987 uint32_t *actual_end_ba)
988{
989 return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
990 ni->info_table_size, actual_start_ba,
991 actual_end_ba);
992}
993
994/*
995 * nmbm_mark_tables_clean - Mark info table `clean'
996 * @ni: NMBM instance structure
997 */
998static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
999{
1000 ni->block_state_changed = 0;
1001 ni->block_mapping_changed = 0;
1002}
1003
1004/*
1005 * nmbm_try_reserve_blocks - Reserve blocks with compromisation
1006 * @ni: NMBM instance structure
1007 * @ba: start physical block address
1008 * @nba: return physical block address after reservation
1009 * @count: number of good blocks to be skipped
1010 * @min_count: minimum number of good blocks to be skipped
1011 * @limit: highest/lowest block address allowed for walking
1012 *
1013 * Reserve specific blocks. If failed, try to reserve as many as possible.
1014 */
1015static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
1016 uint32_t *nba, uint32_t count,
1017 int32_t min_count, int32_t limit)
1018{
1019 int32_t nblocks = count;
1020 bool success;
1021
1022 while (nblocks >= min_count) {
1023 success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
1024 if (success)
1025 return true;
1026
1027 nblocks--;
1028 }
1029
1030 return false;
1031}
1032
1033/*
1034 * nmbm_rebuild_info_table - Build main & backup info table from scratch
1035 * @ni: NMBM instance structure
1036 * @allow_no_gap: allow no spare blocks between two tables
1037 */
1038static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
1039{
1040 uint32_t table_start_ba, table_end_ba, next_start_ba;
1041 uint32_t main_table_end_ba;
1042 bool success;
1043
1044 /* Set initial value */
1045 ni->main_table_ba = 0;
1046 ni->backup_table_ba = 0;
1047 ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
1048
1049 /* Write main table */
1050 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1051 ni->mapping_blocks_top_ba,
1052 &table_start_ba, &table_end_ba);
1053 if (!success) {
1054 /* Failed to write main table, data will be lost */
1055 nlog_emerg(ni, "Unable to write at least one info table!\n");
1056 nlog_emerg(ni, "Please save your data before power off!\n");
1057 ni->protected = 1;
1058 return false;
1059 }
1060
1061 /* Main info table is successfully written, record its offset */
1062 ni->main_table_ba = table_start_ba;
1063 main_table_end_ba = table_end_ba;
1064
1065 /* Adjust mapping_blocks_ba */
1066 ni->mapping_blocks_ba = table_end_ba;
1067
1068 nmbm_mark_tables_clean(ni);
1069
1070 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1071
1072 /* Reserve spare blocks for main info table. */
1073 success = nmbm_try_reserve_blocks(ni, table_end_ba,
1074 &next_start_ba,
1075 ni->info_table_spare_blocks, 0,
1076 ni->mapping_blocks_top_ba -
1077 size2blk(ni, ni->info_table_size));
1078 if (!success) {
1079 /* There is no spare block. */
1080 nlog_debug(ni, "No room for backup info table\n");
1081 return true;
1082 }
1083
1084 /* Write backup info table. */
1085 success = nmbm_write_info_table(ni, next_start_ba,
1086 ni->mapping_blocks_top_ba,
1087 &table_start_ba, &table_end_ba);
1088 if (!success) {
1089 /* There is no enough blocks for backup table. */
1090 nlog_debug(ni, "No room for backup info table\n");
1091 return true;
1092 }
1093
1094 /* Backup table is successfully written, record its offset */
1095 ni->backup_table_ba = table_start_ba;
1096
1097 /* Adjust mapping_blocks_off */
1098 ni->mapping_blocks_ba = table_end_ba;
1099
1100 /* Erase spare blocks of main table to clean possible interference data */
1101 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1102
1103 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1104
1105 return true;
1106}
1107
1108/*
1109 * nmbm_rescue_single_info_table - Rescue when there is only one info table
1110 * @ni: NMBM instance structure
1111 *
1112 * This function is called when there is only one info table exists.
1113 * This function may fail if we can't write new info table
1114 */
1115static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
1116{
1117 uint32_t table_start_ba, table_end_ba, write_ba;
1118 bool success;
1119
1120 /* Try to write new info table in front of existing table */
1121 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1122 ni->main_table_ba,
1123 &table_start_ba,
1124 &table_end_ba);
1125 if (success) {
1126 /*
1127 * New table becomes the main table, existing table becomes
1128 * the backup table.
1129 */
1130 ni->backup_table_ba = ni->main_table_ba;
1131 ni->main_table_ba = table_start_ba;
1132
1133 nmbm_mark_tables_clean(ni);
1134
1135 /* Erase spare blocks of main table to clean possible interference data */
1136 nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
1137
1138 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1139
1140 return true;
1141 }
1142
1143 /* Try to reserve spare blocks for existing table */
1144 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1145 ni->info_table_spare_blocks, 0,
1146 ni->mapping_blocks_top_ba -
1147 size2blk(ni, ni->info_table_size));
1148 if (!success) {
1149 nlog_warn(ni, "Failed to rescue single info table\n");
1150 return false;
1151 }
1152
1153 /* Try to write new info table next to the existing table */
1154 while (write_ba >= ni->mapping_blocks_ba) {
1155 WATCHDOG_RESET();
1156
1157 success = nmbm_write_info_table(ni, write_ba,
1158 ni->mapping_blocks_top_ba,
1159 &table_start_ba,
1160 &table_end_ba);
1161 if (success)
1162 break;
1163
1164 write_ba--;
1165 }
1166
1167 if (success) {
1168 /* Erase spare blocks of main table to clean possible interference data */
1169 nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
1170
1171 /* New table becomes the backup table */
1172 ni->backup_table_ba = table_start_ba;
1173 ni->mapping_blocks_ba = table_end_ba;
1174
1175 nmbm_mark_tables_clean(ni);
1176
1177 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1178
1179 return true;
1180 }
1181
1182 nlog_warn(ni, "Failed to rescue single info table\n");
1183 return false;
1184}
1185
1186/*
1187 * nmbm_update_single_info_table - Update specific one info table
1188 * @ni: NMBM instance structure
1189 */
1190static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
1191 bool update_main_table)
1192{
1193 uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
1194 bool success;
1195
1196 /* Determine the write range */
1197 if (update_main_table) {
1198 write_start_ba = ni->main_table_ba;
1199 write_limit = ni->backup_table_ba;
1200 } else {
1201 write_start_ba = ni->backup_table_ba;
1202 write_limit = ni->mapping_blocks_top_ba;
1203 }
1204
1205 nmbm_mark_block_color_mgmt(ni, write_start_ba, write_limit - 1);
1206
1207 success = nmbm_write_info_table(ni, write_start_ba, write_limit,
1208 &table_start_ba, &table_end_ba);
1209 if (success) {
1210 if (update_main_table) {
1211 ni->main_table_ba = table_start_ba;
1212 } else {
1213 ni->backup_table_ba = table_start_ba;
1214 ni->mapping_blocks_ba = table_end_ba;
1215 }
1216
1217 nmbm_mark_tables_clean(ni);
1218
1219 nlog_table_update(ni, update_main_table, table_start_ba,
1220 table_end_ba);
1221
1222 return true;
1223 }
1224
1225 if (update_main_table) {
1226 /*
1227 * If failed to update main table, make backup table the new
1228 * main table, and call nmbm_rescue_single_info_table()
1229 */
1230 nlog_warn(ni, "Unable to update %s info table\n",
1231 update_main_table ? "Main" : "Backup");
1232
1233 ni->main_table_ba = ni->backup_table_ba;
1234 ni->backup_table_ba = 0;
1235 return nmbm_rescue_single_info_table(ni);
1236 }
1237
1238 /* Only one table left */
1239 ni->mapping_blocks_ba = ni->backup_table_ba;
1240 ni->backup_table_ba = 0;
1241
1242 return false;
1243}
1244
1245/*
1246 * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1247 * @ni: NMBM instance structure
1248 *
1249 * This function is called when main info table failed to be written, and
1250 * backup info table exists.
1251 */
1252static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
1253{
1254 uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
1255 uint32_t main_table_end_ba, write_ba;
1256 uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
1257 bool success;
1258
1259 /* Try to reserve spare blocks for existing backup info table */
1260 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1261 ni->info_table_spare_blocks, 0,
1262 ni->mapping_blocks_top_ba -
1263 info_table_erasesize);
1264 if (!success) {
1265 /* There is no spare block. Backup info table becomes the main table. */
1266 nlog_err(ni, "No room for temporary info table\n");
1267 ni->main_table_ba = ni->backup_table_ba;
1268 ni->backup_table_ba = 0;
1269 return true;
1270 }
1271
1272 /* Try to write temporary info table into spare unmapped blocks */
1273 while (write_ba >= ni->mapping_blocks_ba) {
1274 WATCHDOG_RESET();
1275
1276 success = nmbm_write_info_table(ni, write_ba,
1277 ni->mapping_blocks_top_ba,
1278 &tmp_table_start_ba,
1279 &tmp_table_end_ba);
1280 if (success)
1281 break;
1282
1283 write_ba--;
1284 }
1285
1286 if (!success) {
1287 /* Backup info table becomes the main table */
1288 nlog_err(ni, "Failed to update main info table\n");
1289 ni->main_table_ba = ni->backup_table_ba;
1290 ni->backup_table_ba = 0;
1291 return true;
1292 }
1293
1294 /* Adjust mapping_blocks_off */
1295 ni->mapping_blocks_ba = tmp_table_end_ba;
1296
1297 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1298 tmp_table_end_ba - 1);
1299
1300 /*
1301 * Now write main info table at the beginning of management area.
1302 * This operation will generally destroy the original backup info
1303 * table.
1304 */
1305 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1306 tmp_table_start_ba,
1307 &main_table_start_ba,
1308 &main_table_end_ba);
1309 if (!success) {
1310 /* Temporary info table becomes the main table */
1311 ni->main_table_ba = tmp_table_start_ba;
1312 ni->backup_table_ba = 0;
1313
1314 nmbm_mark_tables_clean(ni);
1315
1316 nlog_err(ni, "Failed to update main info table\n");
1317 nmbm_mark_block_color_info_table(ni, tmp_table_start_ba,
1318 tmp_table_end_ba - 1);
1319
1320 return true;
1321 }
1322
1323 /* Main info table has been successfully written, record its offset */
1324 ni->main_table_ba = main_table_start_ba;
1325
1326 nmbm_mark_tables_clean(ni);
1327
1328 nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
1329
1330 /*
1331 * Temporary info table becomes the new backup info table if it's
1332 * not overwritten.
1333 */
1334 if (main_table_end_ba <= tmp_table_start_ba) {
1335 ni->backup_table_ba = tmp_table_start_ba;
1336
1337 nlog_table_creation(ni, false, tmp_table_start_ba,
1338 tmp_table_end_ba);
1339
1340 return true;
1341 }
1342
1343 /* Adjust mapping_blocks_off */
1344 ni->mapping_blocks_ba = main_table_end_ba;
1345
1346 /* Try to reserve spare blocks for new main info table */
1347 success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
1348 ni->info_table_spare_blocks, 0,
1349 ni->mapping_blocks_top_ba -
1350 info_table_erasesize);
1351 if (!success) {
1352 /* There is no spare block. Only main table exists. */
1353 nlog_err(ni, "No room for backup info table\n");
1354 ni->backup_table_ba = 0;
1355 return true;
1356 }
1357
1358 /* Write new backup info table. */
1359 while (write_ba >= main_table_end_ba) {
1360 WATCHDOG_RESET();
1361
1362 success = nmbm_write_info_table(ni, write_ba,
1363 ni->mapping_blocks_top_ba,
1364 &tmp_table_start_ba,
1365 &tmp_table_end_ba);
1366 if (success)
1367 break;
1368
1369 write_ba--;
1370 }
1371
1372 if (!success) {
1373 nlog_err(ni, "No room for backup info table\n");
1374 ni->backup_table_ba = 0;
1375 return true;
1376 }
1377
1378 /* Backup info table has been successfully written, record its offset */
1379 ni->backup_table_ba = tmp_table_start_ba;
1380
1381 /* Adjust mapping_blocks_off */
1382 ni->mapping_blocks_ba = tmp_table_end_ba;
1383
1384 /* Erase spare blocks of main table to clean possible interference data */
1385 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1386
1387 nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
1388
1389 return true;
1390}
1391
1392/*
1393 * nmbm_update_info_table_once - Update info table once
1394 * @ni: NMBM instance structure
1395 * @force: force update
1396 *
1397 * Update both main and backup info table. Return true if at least one info
1398 * table has been successfully written.
1399 * This function only try to update info table once regard less of the result.
1400 */
1401static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
1402{
1403 uint32_t table_start_ba, table_end_ba;
1404 uint32_t main_table_limit;
1405 bool success;
1406
1407 /* Do nothing if there is no change */
1408 if (!nmbm_generate_info_table_cache(ni) && !force)
1409 return true;
1410
1411 /* Check whether both two tables exist */
1412 if (!ni->backup_table_ba) {
1413 main_table_limit = ni->mapping_blocks_top_ba;
1414 goto write_main_table;
1415 }
1416
1417 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1418 ni->mapping_blocks_ba - 1);
1419
1420 /*
1421 * Write backup info table in its current range.
1422 * Note that limit is set to mapping_blocks_top_off to provide as many
1423 * spare blocks as possible for the backup table. If at last
1424 * unmapped blocks are used by backup table, mapping_blocks_off will
1425 * be adjusted.
1426 */
1427 success = nmbm_write_info_table(ni, ni->backup_table_ba,
1428 ni->mapping_blocks_top_ba,
1429 &table_start_ba, &table_end_ba);
1430 if (!success) {
1431 /*
1432 * There is nothing to do if failed to write backup table.
1433 * Write the main table now.
1434 */
1435 nlog_err(ni, "No room for backup table\n");
1436 ni->mapping_blocks_ba = ni->backup_table_ba;
1437 ni->backup_table_ba = 0;
1438 main_table_limit = ni->mapping_blocks_top_ba;
1439 goto write_main_table;
1440 }
1441
1442 /* Backup table is successfully written, record its offset */
1443 ni->backup_table_ba = table_start_ba;
1444
1445 /* Adjust mapping_blocks_off */
1446 ni->mapping_blocks_ba = table_end_ba;
1447
1448 nmbm_mark_tables_clean(ni);
1449
1450 /* The normal limit of main table */
1451 main_table_limit = ni->backup_table_ba;
1452
1453 nlog_table_update(ni, false, table_start_ba, table_end_ba);
1454
1455write_main_table:
1456 if (!ni->main_table_ba)
1457 goto rebuild_tables;
1458
1459 if (!ni->backup_table_ba)
1460 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1461 ni->mapping_blocks_ba - 1);
1462 else
1463 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1464 ni->backup_table_ba - 1);
1465
1466 /* Write main info table in its current range */
1467 success = nmbm_write_info_table(ni, ni->main_table_ba,
1468 main_table_limit, &table_start_ba,
1469 &table_end_ba);
1470 if (!success) {
1471 /* If failed to write main table, go rescue procedure */
1472 if (!ni->backup_table_ba)
1473 goto rebuild_tables;
1474
1475 return nmbm_rescue_main_info_table(ni);
1476 }
1477
1478 /* Main info table is successfully written, record its offset */
1479 ni->main_table_ba = table_start_ba;
1480
1481 /* Adjust mapping_blocks_off */
1482 if (!ni->backup_table_ba)
1483 ni->mapping_blocks_ba = table_end_ba;
1484
1485 nmbm_mark_tables_clean(ni);
1486
1487 nlog_table_update(ni, true, table_start_ba, table_end_ba);
1488
1489 return true;
1490
1491rebuild_tables:
1492 return nmbm_rebuild_info_table(ni);
1493}
1494
1495/*
1496 * nmbm_update_info_table - Update info table
1497 * @ni: NMBM instance structure
1498 *
1499 * Update both main and backup info table. Return true if at least one table
1500 * has been successfully written.
1501 * This function will try to update info table repeatedly until no new bad
1502 * block found during updating.
1503 */
1504static bool nmbm_update_info_table(struct nmbm_instance *ni)
1505{
1506 bool success;
1507
1508 if (ni->protected)
1509 return true;
1510
1511 while (ni->block_state_changed || ni->block_mapping_changed) {
1512 success = nmbm_update_info_table_once(ni, false);
1513 if (!success) {
1514 nlog_err(ni, "Failed to update info table\n");
1515 return false;
1516 }
1517 }
1518
1519 return true;
1520}
1521
1522/*
1523 * nmbm_map_block - Map a bad block to a unused spare block
1524 * @ni: NMBM instance structure
1525 * @lb: logic block addr to map
1526 */
1527static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
1528{
1529 uint32_t pb;
1530 bool success;
1531
1532 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1533 nlog_warn(ni, "No spare unmapped blocks.\n");
1534 return false;
1535 }
1536
1537 success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
1538 ni->mapping_blocks_ba);
1539 if (!success) {
1540 nlog_warn(ni, "No spare unmapped blocks.\n");
1541 nmbm_update_info_table(ni);
1542 ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
1543 return false;
1544 }
1545
1546 ni->block_mapping[lb] = pb;
1547 ni->mapping_blocks_top_ba--;
1548 ni->block_mapping_changed++;
1549
1550 nlog_info(ni, "Logic block %u mapped to physical blcok %u\n", lb, pb);
1551 nmbm_mark_block_color_mapped(ni, pb);
1552
1553 return true;
1554}
1555
1556/*
1557 * nmbm_create_info_table - Create info table(s)
1558 * @ni: NMBM instance structure
1559 *
1560 * This function assumes that the chip has no existing info table(s)
1561 */
1562static bool nmbm_create_info_table(struct nmbm_instance *ni)
1563{
1564 uint32_t lb;
1565 bool success;
1566
1567 /* Set initial mapping_blocks_top_off */
1568 success = nmbm_block_walk(ni, false, ni->signature_ba,
1569 &ni->mapping_blocks_top_ba, 1,
1570 ni->mgmt_start_ba);
1571 if (!success) {
1572 nlog_err(ni, "No room for spare blocks\n");
1573 return false;
1574 }
1575
1576 /* Generate info table cache */
1577 nmbm_generate_info_table_cache(ni);
1578
1579 /* Write info table */
1580 success = nmbm_rebuild_info_table(ni);
1581 if (!success) {
1582 nlog_err(ni, "Failed to build info tables\n");
1583 return false;
1584 }
1585
1586 /* Remap bad block(s) at end of data area */
1587 for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
1588 success = nmbm_map_block(ni, lb);
1589 if (!success)
1590 break;
1591
1592 ni->data_block_count++;
1593 }
1594
1595 /* If state table and/or mapping table changed, update info table. */
1596 success = nmbm_update_info_table(ni);
1597 if (!success)
1598 return false;
1599
1600 return true;
1601}
1602
1603/*
1604 * nmbm_create_new - Create NMBM on a new chip
1605 * @ni: NMBM instance structure
1606 */
1607static bool nmbm_create_new(struct nmbm_instance *ni)
1608{
1609 bool success;
1610
1611 /* Determine the boundary of management blocks */
1612 ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->lower.max_ratio) / NMBM_MGMT_DIV;
1613
1614 if (ni->lower.max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->lower.max_reserved_blocks)
1615 ni->mgmt_start_ba = ni->block_count - ni->lower.max_reserved_blocks;
1616
1617 nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1618 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1619 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba, ni->block_count - 1);
1620
1621 /* Fill block state table & mapping table */
1622 nmbm_scan_badblocks(ni);
1623 nmbm_build_mapping_table(ni);
1624
1625 /* Write signature */
1626 ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
1627 ni->signature.header.version = NMBM_VER;
1628 ni->signature.header.size = sizeof(ni->signature);
1629 ni->signature.nand_size = ni->lower.size;
1630 ni->signature.block_size = ni->lower.erasesize;
1631 ni->signature.page_size = ni->lower.writesize;
1632 ni->signature.spare_size = ni->lower.oobsize;
1633 ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
1634 ni->signature.max_try_count = NMBM_TRY_COUNT;
1635 nmbm_update_checksum(&ni->signature.header);
1636
1637 success = nmbm_write_signature(ni, ni->mgmt_start_ba,
1638 &ni->signature, &ni->signature_ba);
1639 if (!success) {
1640 nlog_err(ni, "Failed to write signature to a proper offset\n");
1641 return false;
1642 }
1643
1644 nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
1645 ni->signature_ba, ba2addr(ni, ni->signature_ba));
1646 nmbm_mark_block_color_signature(ni, ni->signature_ba);
1647
1648 /* Write info table(s) */
1649 success = nmbm_create_info_table(ni);
1650 if (success) {
1651 nlog_info(ni, "NMBM has been successfully created\n");
1652 return true;
1653 }
1654
1655 return false;
1656}
1657
1658/*
1659 * nmbm_check_info_table_header - Check if a info table header is valid
1660 * @ni: NMBM instance structure
1661 * @data: pointer to the info table header
1662 */
1663static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
1664{
1665 struct nmbm_info_table_header *ifthdr = data;
1666
1667 if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
1668 return false;
1669
1670 if (ifthdr->header.size != ni->info_table_size)
1671 return false;
1672
1673 if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
1674 return false;
1675
1676 if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
1677 return false;
1678
1679 return true;
1680}
1681
1682/*
1683 * nmbm_check_info_table - Check if a whole info table is valid
1684 * @ni: NMBM instance structure
1685 * @start_ba: start block address of this table
1686 * @end_ba: end block address of this table
1687 * @data: pointer to the info table header
1688 * @mapping_blocks_top_ba: return the block address of top remapped block
1689 */
1690static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
1691 uint32_t end_ba, void *data,
1692 uint32_t *mapping_blocks_top_ba)
1693{
1694 struct nmbm_info_table_header *ifthdr = data;
1695 int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
1696 nmbm_bitmap_t *block_state = (nmbm_bitmap_t *)((uintptr_t)data + ifthdr->state_table_off);
1697 uint32_t minimum_mapping_pb = ni->signature_ba;
1698 uint32_t ba;
1699
1700 for (ba = 0; ba < ni->data_block_count; ba++) {
1701 if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
1702 block_mapping[ba] == ni->signature_ba)
1703 return false;
1704
1705 if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
1706 minimum_mapping_pb = block_mapping[ba];
1707 }
1708
1709 for (ba = start_ba; ba < end_ba; ba++) {
1710 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1711 continue;
1712
1713 if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
1714 return false;
1715 }
1716
1717 *mapping_blocks_top_ba = minimum_mapping_pb - 1;
1718
1719 return true;
1720}
1721
1722/*
1723 * nmbm_try_load_info_table - Try to load info table from a address
1724 * @ni: NMBM instance structure
1725 * @ba: start block address of the info table
1726 * @eba: return the block address after end of the table
1727 * @write_count: return the write count of this table
1728 * @mapping_blocks_top_ba: return the block address of top remapped block
1729 * @table_loaded: used to record whether ni->info_table has valid data
1730 */
1731static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1732 uint32_t *eba, uint32_t *write_count,
1733 uint32_t *mapping_blocks_top_ba,
1734 bool table_loaded)
1735{
1736 struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
1737 uint8_t *off = ni->info_table_cache;
1738 uint32_t limit = ba + size2blk(ni, ni->info_table_size);
1739 uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
1740 bool success, checkhdr = true;
1741 int ret;
1742
1743 while (sizeremain && ba < limit) {
1744 WATCHDOG_RESET();
1745
1746 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1747 goto next_block;
1748
1749 if (nmbm_check_bad_phys_block(ni, ba)) {
1750 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1751 goto next_block;
1752 }
1753
1754 chunksize = sizeremain;
1755 if (chunksize > ni->lower.erasesize)
1756 chunksize = ni->lower.erasesize;
1757
1758 /* Assume block with ECC error has no info table data */
1759 ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
1760 if (ret < 0)
1761 goto skip_bad_block;
1762 else if (ret > 0)
1763 return false;
1764
1765 if (checkhdr) {
1766 success = nmbm_check_info_table_header(ni, off);
1767 if (!success)
1768 return false;
1769
1770 start_ba = ba;
1771 checkhdr = false;
1772 }
1773
1774 off += chunksize;
1775 sizeremain -= chunksize;
1776
1777 goto next_block;
1778
1779 skip_bad_block:
1780 /* Only mark bad in memory */
1781 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1782
1783 next_block:
1784 ba++;
1785 }
1786
1787 if (sizeremain)
1788 return false;
1789
1790 success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
1791 if (!success)
1792 return false;
1793
1794 *eba = ba;
1795 *write_count = ifthdr->write_count;
1796
1797 success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
1798 mapping_blocks_top_ba);
1799 if (!success)
1800 return false;
1801
1802 if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
1803 memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
1804 memcpy(ni->block_state,
1805 (uint8_t *)ifthdr + ifthdr->state_table_off,
1806 ni->state_table_size);
1807 memcpy(ni->block_mapping,
1808 (uint8_t *)ifthdr + ifthdr->mapping_table_off,
1809 ni->mapping_table_size);
1810 ni->info_table.write_count = ifthdr->write_count;
1811 }
1812
1813 return true;
1814}
1815
1816/*
1817 * nmbm_search_info_table - Search info table from specific address
1818 * @ni: NMBM instance structure
1819 * @ba: start block address to search
1820 * @limit: highest block address allowed for searching
1821 * @table_start_ba: return the start block address of this table
1822 * @table_end_ba: return the block address after end of this table
1823 * @write_count: return the write count of this table
1824 * @mapping_blocks_top_ba: return the block address of top remapped block
1825 * @table_loaded: used to record whether ni->info_table has valid data
1826 */
1827static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
1828 uint32_t limit, uint32_t *table_start_ba,
1829 uint32_t *table_end_ba,
1830 uint32_t *write_count,
1831 uint32_t *mapping_blocks_top_ba,
1832 bool table_loaded)
1833{
1834 bool success;
1835
1836 while (ba < limit - size2blk(ni, ni->info_table_size)) {
1837 WATCHDOG_RESET();
1838
1839 success = nmbm_try_load_info_table(ni, ba, table_end_ba,
1840 write_count,
1841 mapping_blocks_top_ba,
1842 table_loaded);
1843 if (success) {
1844 *table_start_ba = ba;
1845 return true;
1846 }
1847
1848 ba++;
1849 }
1850
1851 return false;
1852}
1853
1854/*
1855 * nmbm_load_info_table - Load info table(s) from a chip
1856 * @ni: NMBM instance structure
1857 * @ba: start block address to search info table
1858 * @limit: highest block address allowed for searching
1859 */
1860static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1861 uint32_t limit)
1862{
1863 uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
1864 uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
1865 uint32_t main_table_write_count, backup_table_write_count;
1866 uint32_t i;
1867 bool success;
1868
1869 /* Set initial value */
1870 ni->main_table_ba = 0;
1871 ni->backup_table_ba = 0;
1872 ni->info_table.write_count = 0;
1873 ni->mapping_blocks_top_ba = ni->signature_ba - 1;
1874 ni->data_block_count = ni->signature.mgmt_start_pb;
1875
1876 /* Find first info table */
1877 success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
1878 &main_table_end_ba, &main_table_write_count,
1879 &main_mapping_blocks_top_ba, false);
1880 if (!success) {
1881 nlog_warn(ni, "No valid info table found\n");
1882 return false;
1883 }
1884
1885 table_end_ba = main_table_end_ba;
1886
1887 nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
1888 main_table_end_ba);
1889
1890 /* Find second info table */
1891 success = nmbm_search_info_table(ni, main_table_end_ba, limit,
1892 &ni->backup_table_ba, &backup_table_end_ba,
1893 &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
1894 if (!success) {
1895 nlog_warn(ni, "Second info table not found\n");
1896 } else {
1897 table_end_ba = backup_table_end_ba;
1898
1899 nlog_table_found(ni, false, backup_table_write_count,
1900 ni->backup_table_ba, backup_table_end_ba);
1901 }
1902
1903 /* Pick mapping_blocks_top_ba */
1904 if (!ni->backup_table_ba) {
1905 ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
1906 } else {
1907 if (main_table_write_count >= backup_table_write_count)
1908 ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
1909 else
1910 ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
1911 }
1912
1913 /* Set final mapping_blocks_ba */
1914 ni->mapping_blocks_ba = table_end_ba;
1915
1916 /* Set final data_block_count */
1917 for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
1918 if (ni->block_mapping[i - 1] >= 0) {
1919 ni->data_block_count = i;
1920 break;
1921 }
1922 }
1923
1924 /* Debug purpose: mark mapped blocks and bad blocks */
1925 for (i = 0; i < ni->data_block_count; i++) {
1926 if (ni->block_mapping[i] > ni->mapping_blocks_top_ba)
1927 nmbm_mark_block_color_mapped(ni, ni->block_mapping[i]);
1928 }
1929
1930 for (i = 0; i < ni->block_count; i++) {
1931 if (nmbm_get_block_state(ni, i) == BLOCK_ST_BAD)
1932 nmbm_mark_block_color_bad(ni, i);
1933 }
1934
1935 /* Regenerate the info table cache from the final selected info table */
1936 nmbm_generate_info_table_cache(ni);
1937
1938 /*
1939 * If only one table exists, try to write another table.
1940 * If two tables have different write count, try to update info table
1941 */
1942 if (!ni->backup_table_ba) {
1943 success = nmbm_rescue_single_info_table(ni);
1944 } else if (main_table_write_count != backup_table_write_count) {
1945 /* Mark state & mapping tables changed */
1946 ni->block_state_changed = 1;
1947 ni->block_mapping_changed = 1;
1948
1949 success = nmbm_update_single_info_table(ni,
1950 main_table_write_count < backup_table_write_count);
1951 } else {
1952 success = true;
1953 }
1954
1955 /*
1956 * If there is no spare unmapped blocks, or still only one table
1957 * exists, set the chip to read-only
1958 */
1959 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1960 nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
1961 ni->protected = 1;
1962 } else if (!success) {
1963 nlog_warn(ni, "Only one info table found. Device is now read-only\n");
1964 ni->protected = 1;
1965 }
1966
1967 return true;
1968}
1969
1970/*
1971 * nmbm_load_existing - Load NMBM from a new chip
1972 * @ni: NMBM instance structure
1973 */
1974static bool nmbm_load_existing(struct nmbm_instance *ni)
1975{
1976 bool success;
1977
1978 /* Calculate the boundary of management blocks */
1979 ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
1980
1981 nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1982 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1983 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1984 ni->signature_ba - 1);
1985
1986 /* Look for info table(s) */
1987 success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
1988 ni->signature_ba);
1989 if (success) {
1990 nlog_info(ni, "NMBM has been successfully attached\n");
1991 return true;
1992 }
1993
1994 if (!(ni->lower.flags & NMBM_F_CREATE))
1995 return false;
1996
1997 /* Fill block state table & mapping table */
1998 nmbm_scan_badblocks(ni);
1999 nmbm_build_mapping_table(ni);
2000
2001 /* Write info table(s) */
2002 success = nmbm_create_info_table(ni);
2003 if (success) {
2004 nlog_info(ni, "NMBM has been successfully created\n");
2005 return true;
2006 }
2007
2008 return false;
2009}
2010
2011/*
2012 * nmbm_find_signature - Find signature in the lower NAND chip
2013 * @ni: NMBM instance structure
2014 * @signature_ba: used for storing block address of the signature
2015 * @signature_ba: return the actual block address of signature block
2016 *
2017 * Find a valid signature from a specific range in the lower NAND chip,
2018 * from bottom (highest address) to top (lowest address)
2019 *
2020 * Return true if found.
2021 */
2022static bool nmbm_find_signature(struct nmbm_instance *ni,
2023 struct nmbm_signature *signature,
2024 uint32_t *signature_ba)
2025{
2026 struct nmbm_signature sig;
2027 uint64_t off, addr;
2028 uint32_t block_count, ba, limit;
2029 bool success;
2030 int ret;
2031
2032 /* Calculate top and bottom block address */
2033 block_count = ni->lower.size >> ni->erasesize_shift;
2034 ba = block_count;
2035 limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->lower.max_ratio);
2036 if (ni->lower.max_reserved_blocks && block_count - limit > ni->lower.max_reserved_blocks)
2037 limit = block_count - ni->lower.max_reserved_blocks;
2038
2039 while (ba >= limit) {
2040 WATCHDOG_RESET();
2041
2042 ba--;
2043 addr = ba2addr(ni, ba);
2044
2045 if (nmbm_check_bad_phys_block(ni, ba))
2046 continue;
2047
2048 /* Check every page.
2049 * As long as at leaset one page contains valid signature,
2050 * the block is treated as a valid signature block.
2051 */
2052 for (off = 0; off < ni->lower.erasesize;
2053 off += ni->lower.writesize) {
2054 WATCHDOG_RESET();
2055
2056 ret = nmbn_read_data(ni, addr + off, &sig,
2057 sizeof(sig));
2058 if (ret)
2059 continue;
2060
2061 /* Check for header size and checksum */
2062 success = nmbm_check_header(&sig, sizeof(sig));
2063 if (!success)
2064 continue;
2065
2066 /* Check for header magic */
2067 if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
2068 /* Found it */
2069 memcpy(signature, &sig, sizeof(sig));
2070 *signature_ba = ba;
2071 return true;
2072 }
2073 }
2074 };
2075
2076 return false;
2077}
2078
2079/*
2080 * is_power_of_2_u64 - Check whether a 64-bit integer is power of 2
2081 * @n: number to check
2082 */
2083static bool is_power_of_2_u64(uint64_t n)
2084{
2085 return (n != 0 && ((n & (n - 1)) == 0));
2086}
2087
2088/*
2089 * nmbm_check_lower_members - Validate the members of lower NAND device
2090 * @nld: Lower NAND chip structure
2091 */
2092static bool nmbm_check_lower_members(struct nmbm_lower_device *nld)
2093{
2094
2095 if (!nld->size || !is_power_of_2_u64(nld->size)) {
2096 nmbm_log_lower(nld, NMBM_LOG_ERR,
2097 "Chip size %llu is not valid\n", nld->size);
2098 return false;
2099 }
2100
2101 if (!nld->erasesize || !is_power_of_2(nld->erasesize)) {
2102 nmbm_log_lower(nld, NMBM_LOG_ERR,
2103 "Block size %u is not valid\n", nld->erasesize);
2104 return false;
2105 }
2106
2107 if (!nld->writesize || !is_power_of_2(nld->writesize)) {
2108 nmbm_log_lower(nld, NMBM_LOG_ERR,
2109 "Page size %u is not valid\n", nld->writesize);
2110 return false;
2111 }
2112
2113 if (!nld->oobsize || !is_power_of_2(nld->oobsize)) {
2114 nmbm_log_lower(nld, NMBM_LOG_ERR,
2115 "Page spare size %u is not valid\n", nld->oobsize);
2116 return false;
2117 }
2118
2119 if (!nld->read_page || !nld->write_page || !nld->erase_block) {
2120 nmbm_log_lower(nld, NMBM_LOG_ERR,
2121 "read_page(), write_page() and erase_block() are required\n");
2122 return false;
2123 }
2124
2125 /* Data sanity check */
2126 if (!nld->max_ratio)
2127 nld->max_ratio = 1;
2128
2129 if (nld->max_ratio >= NMBM_MGMT_DIV - 1) {
2130 nmbm_log_lower(nld, NMBM_LOG_ERR,
2131 "max ratio %u is invalid\n", nld->max_ratio);
2132 return false;
2133 }
2134
2135 if (nld->max_reserved_blocks && nld->max_reserved_blocks < NMBM_MGMT_BLOCKS_MIN) {
2136 nmbm_log_lower(nld, NMBM_LOG_ERR,
2137 "max reserved blocks %u is too small\n", nld->max_reserved_blocks);
2138 return false;
2139 }
2140
2141 return true;
2142}
2143
2144/*
2145 * nmbm_calc_structure_size - Calculate the instance structure size
2146 * @nld: NMBM lower device structure
2147 */
2148size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld)
2149{
2150 uint32_t state_table_size, mapping_table_size, info_table_size;
2151 uint32_t block_count;
2152
2153 block_count = nmbm_lldiv(nld->size, nld->erasesize);
2154
2155 /* Calculate info table size */
2156 state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2157 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2158 mapping_table_size = block_count * sizeof(int32_t);
2159
2160 info_table_size = NMBM_ALIGN(sizeof(struct nmbm_info_table_header),
2161 nld->writesize);
2162 info_table_size += NMBM_ALIGN(state_table_size, nld->writesize);
2163 info_table_size += NMBM_ALIGN(mapping_table_size, nld->writesize);
2164
2165 return info_table_size + state_table_size + mapping_table_size +
2166 nld->writesize + nld->oobsize + sizeof(struct nmbm_instance);
2167}
2168
2169/*
2170 * nmbm_init_structure - Initialize members of instance structure
2171 * @ni: NMBM instance structure
2172 */
2173static void nmbm_init_structure(struct nmbm_instance *ni)
2174{
2175 uint32_t pages_per_block, blocks_per_chip;
2176 uintptr_t ptr;
2177
2178 pages_per_block = ni->lower.erasesize / ni->lower.writesize;
2179 blocks_per_chip = nmbm_lldiv(ni->lower.size, ni->lower.erasesize);
2180
2181 ni->rawpage_size = ni->lower.writesize + ni->lower.oobsize;
2182 ni->rawblock_size = pages_per_block * ni->rawpage_size;
2183 ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
2184
2185 ni->writesize_mask = ni->lower.writesize - 1;
2186 ni->erasesize_mask = ni->lower.erasesize - 1;
2187
2188 ni->writesize_shift = ffs(ni->lower.writesize) - 1;
2189 ni->erasesize_shift = ffs(ni->lower.erasesize) - 1;
2190
2191 /* Calculate number of block this chip */
2192 ni->block_count = ni->lower.size >> ni->erasesize_shift;
2193
2194 /* Calculate info table size */
2195 ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2196 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2197 ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
2198
2199 ni->info_table_size = NMBM_ALIGN(sizeof(ni->info_table),
2200 ni->lower.writesize);
2201 ni->info_table.state_table_off = ni->info_table_size;
2202
2203 ni->info_table_size += NMBM_ALIGN(ni->state_table_size,
2204 ni->lower.writesize);
2205 ni->info_table.mapping_table_off = ni->info_table_size;
2206
2207 ni->info_table_size += NMBM_ALIGN(ni->mapping_table_size,
2208 ni->lower.writesize);
2209
2210 ni->info_table_spare_blocks = nmbm_get_spare_block_count(
2211 size2blk(ni, ni->info_table_size));
2212
2213 /* Assign memory to members */
2214 ptr = (uintptr_t)ni + sizeof(*ni);
2215
2216 ni->info_table_cache = (void *)ptr;
2217 ptr += ni->info_table_size;
2218
2219 ni->block_state = (void *)ptr;
2220 ptr += ni->state_table_size;
2221
2222 ni->block_mapping = (void *)ptr;
2223 ptr += ni->mapping_table_size;
2224
2225 ni->page_cache = (uint8_t *)ptr;
2226
2227 /* Initialize block state table */
2228 ni->block_state_changed = 0;
2229 memset(ni->block_state, 0xff, ni->state_table_size);
2230
2231 /* Initialize block mapping table */
2232 ni->block_mapping_changed = 0;
2233}
2234
2235/*
2236 * nmbm_attach - Attach to a lower device
2237 * @nld: NMBM lower device structure
2238 * @ni: NMBM instance structure
2239 */
2240int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni)
2241{
2242 bool success;
2243
2244 if (!nld || !ni)
2245 return -EINVAL;
2246
2247 /* Set default log level */
2248 ni->log_display_level = NMBM_DEFAULT_LOG_LEVEL;
2249
2250 /* Check lower members */
2251 success = nmbm_check_lower_members(nld);
2252 if (!success)
2253 return -EINVAL;
2254
2255 /* Initialize NMBM instance */
2256 memcpy(&ni->lower, nld, sizeof(struct nmbm_lower_device));
2257 nmbm_init_structure(ni);
2258
2259 success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
2260 if (!success) {
2261 if (!(nld->flags & NMBM_F_CREATE)) {
2262 nlog_err(ni, "Signature not found\n");
2263 return -ENODEV;
2264 }
2265
2266 success = nmbm_create_new(ni);
2267 if (!success)
2268 return -ENODEV;
2269
2270 return 0;
2271 }
2272
2273 nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
2274 ni->signature_ba, ba2addr(ni, ni->signature_ba));
2275 nmbm_mark_block_color_signature(ni, ni->signature_ba);
2276
2277 if (ni->signature.header.version != NMBM_VER) {
2278 nlog_err(ni, "NMBM version %u.%u is not supported\n",
2279 NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
2280 NMBM_VERSION_MINOR_GET(ni->signature.header.version));
2281 return -EINVAL;
2282 }
2283
2284 if (ni->signature.nand_size != nld->size ||
2285 ni->signature.block_size != nld->erasesize ||
2286 ni->signature.page_size != nld->writesize ||
2287 ni->signature.spare_size != nld->oobsize) {
2288 nlog_err(ni, "NMBM configuration mismatch\n");
2289 return -EINVAL;
2290 }
2291
2292 success = nmbm_load_existing(ni);
2293 if (!success)
2294 return -ENODEV;
2295
2296 return 0;
2297}
2298
2299/*
2300 * nmbm_detach - Detach from a lower device, and save all tables
2301 * @ni: NMBM instance structure
2302 */
2303int nmbm_detach(struct nmbm_instance *ni)
2304{
2305 if (!ni)
2306 return -EINVAL;
2307
2308 nmbm_update_info_table(ni);
2309
2310 nmbm_mark_block_color_normal(ni, 0, ni->block_count - 1);
2311
2312 return 0;
2313}
2314
2315/*
2316 * nmbm_erase_logic_block - Erase a logic block
2317 * @ni: NMBM instance structure
2318 * @nmbm_erase_logic_block: logic block address
2319 *
2320 * Logic block will be mapped to physical block before erasing.
2321 * Bad block found during erasinh will be remapped to a good block if there is
2322 * still at least one good spare block available.
2323 */
2324static int nmbm_erase_logic_block(struct nmbm_instance *ni, uint32_t block_addr)
2325{
2326 uint32_t pb;
2327 bool success;
2328
2329retry:
2330 /* Map logic block to physical block */
2331 pb = ni->block_mapping[block_addr];
2332
2333 /* Whether the logic block is good (has valid mapping) */
2334 if ((int32_t)pb < 0) {
2335 nlog_debug(ni, "Logic block %u is a bad block\n", block_addr);
2336 return -EIO;
2337 }
2338
2339 /* Remap logic block if current physical block is a bad block */
2340 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD ||
2341 nmbm_get_block_state(ni, pb) == BLOCK_ST_NEED_REMAP)
2342 goto remap_logic_block;
developer4f9017d2021-06-16 17:18:47 +08002343
2344 /* Insurance to detect unexpected bad block marked by user */
2345 if (nmbm_check_bad_phys_block(ni, pb)) {
2346 nlog_warn(ni, "Found unexpected bad block possibly marked by user\n");
2347 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2348 goto remap_logic_block;
2349 }
developer8d16ac22021-05-26 15:32:12 +08002350
2351 success = nmbm_erase_phys_block(ni, ba2addr(ni, pb));
2352 if (success)
2353 return 0;
2354
2355 /* Mark bad block */
2356 nmbm_mark_phys_bad_block(ni, pb);
2357 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2358
2359remap_logic_block:
2360 /* Try to assign a new block */
2361 success = nmbm_map_block(ni, block_addr);
2362 if (!success) {
2363 /* Mark logic block unusable, and update info table */
2364 ni->block_mapping[block_addr] = -1;
2365 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2366 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2367 nmbm_update_info_table(ni);
2368 return -EIO;
2369 }
2370
2371 /* Update info table before erasing */
2372 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2373 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2374 nmbm_update_info_table(ni);
2375
2376 goto retry;
2377}
2378
2379/*
2380 * nmbm_erase_block_range - Erase logic blocks
2381 * @ni: NMBM instance structure
2382 * @addr: logic linear address
2383 * @size: erase range
2384 * @failed_addr: return failed block address if error occurs
2385 */
2386int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
2387 uint64_t size, uint64_t *failed_addr)
2388{
2389 uint32_t start_ba, end_ba;
2390 int ret;
2391
2392 if (!ni)
2393 return -EINVAL;
2394
2395 /* Sanity check */
2396 if (ni->protected) {
2397 nlog_debug(ni, "Device is forced read-only\n");
2398 return -EROFS;
2399 }
2400
2401 if (addr >= ba2addr(ni, ni->data_block_count)) {
2402 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2403 return -EINVAL;
2404 }
2405
2406 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2407 nlog_err(ni, "Erase range 0xllxu is too large\n", size);
2408 return -EINVAL;
2409 }
2410
2411 if (!size) {
2412 nlog_warn(ni, "No blocks to be erased\n");
2413 return 0;
2414 }
2415
2416 start_ba = addr2ba(ni, addr);
2417 end_ba = addr2ba(ni, addr + size - 1);
2418
2419 while (start_ba <= end_ba) {
2420 WATCHDOG_RESET();
2421
2422 ret = nmbm_erase_logic_block(ni, start_ba);
2423 if (ret) {
2424 if (failed_addr)
2425 *failed_addr = ba2addr(ni, start_ba);
2426 return ret;
2427 }
2428
2429 start_ba++;
2430 }
2431
2432 return 0;
2433}
2434
2435/*
2436 * nmbm_read_logic_page - Read page based on logic address
2437 * @ni: NMBM instance structure
2438 * @addr: logic linear address
2439 * @data: buffer to store main data. optional.
2440 * @oob: buffer to store oob data. optional.
2441 * @mode: read mode
2442 */
2443static int nmbm_read_logic_page(struct nmbm_instance *ni, uint64_t addr,
2444 void *data, void *oob, enum nmbm_oob_mode mode)
2445{
2446 uint32_t lb, pb, offset;
2447 uint64_t paddr;
2448 int ret;
2449
2450 /* Extract block address and in-block offset */
2451 lb = addr2ba(ni, addr);
2452 offset = addr & ni->erasesize_mask;
2453
2454 /* Map logic block to physical block */
2455 pb = ni->block_mapping[lb];
2456
2457 /* Whether the logic block is good (has valid mapping) */
2458 if ((int32_t)pb < 0) {
2459 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2460 return -EIO;
2461 }
2462
2463 /* Fail if physical block is marked bad */
2464 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2465 return -EIO;
2466
2467 /* Assemble new address */
2468 paddr = ba2addr(ni, pb) + offset;
2469
2470 ret = nmbm_read_phys_page(ni, paddr, data, oob, mode);
2471 if (!ret)
2472 return 0;
2473
2474 /* For ECC error, return positive value only */
2475 if (ret > 0)
2476 return 1;
2477
2478 /*
2479 * Do not remap bad block here. Just mark this block in state table.
2480 * Remap this block on erasing.
2481 */
2482 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2483 nmbm_update_info_table(ni);
2484
2485 return -EIO;
2486}
2487
2488/*
2489 * nmbm_read_single_page - Read one page based on logic address
2490 * @ni: NMBM instance structure
2491 * @addr: logic linear address
2492 * @data: buffer to store main data. optional.
2493 * @oob: buffer to store oob data. optional.
2494 * @mode: read mode
2495 */
2496int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
2497 void *oob, enum nmbm_oob_mode mode)
2498{
2499 if (!ni)
2500 return -EINVAL;
2501
2502 /* Sanity check */
2503 if (ni->protected) {
2504 nlog_debug(ni, "Device is forced read-only\n");
2505 return -EROFS;
2506 }
2507
2508 if (addr >= ba2addr(ni, ni->data_block_count)) {
2509 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2510 return -EINVAL;
2511 }
2512
2513 return nmbm_read_logic_page(ni, addr, data, oob, mode);
2514}
2515
2516/*
2517 * nmbm_read_range - Read data without oob
2518 * @ni: NMBM instance structure
2519 * @addr: logic linear address
2520 * @size: data size to read
2521 * @data: buffer to store main data to be read
2522 * @mode: read mode
2523 * @retlen: return actual data size read
2524 */
2525int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2526 void *data, enum nmbm_oob_mode mode, size_t *retlen)
2527{
2528 uint64_t off = addr;
2529 uint8_t *ptr = data;
2530 size_t sizeremain = size, chunksize, leading;
2531 int ret;
2532
2533 if (!ni)
2534 return -EINVAL;
2535
2536 /* Sanity check */
2537 if (ni->protected) {
2538 nlog_debug(ni, "Device is forced read-only\n");
2539 return -EROFS;
2540 }
2541
2542 if (addr >= ba2addr(ni, ni->data_block_count)) {
2543 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2544 return -EINVAL;
2545 }
2546
2547 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2548 nlog_err(ni, "Read range 0x%llx is too large\n", size);
2549 return -EINVAL;
2550 }
2551
2552 if (!size) {
2553 nlog_warn(ni, "No data to be read\n");
2554 return 0;
2555 }
2556
2557 while (sizeremain) {
2558 WATCHDOG_RESET();
2559
2560 leading = off & ni->writesize_mask;
2561 chunksize = ni->lower.writesize - leading;
2562 if (chunksize > sizeremain)
2563 chunksize = sizeremain;
2564
2565 if (chunksize == ni->lower.writesize) {
2566 ret = nmbm_read_logic_page(ni, off - leading, ptr,
2567 NULL, mode);
2568 if (ret)
2569 break;
2570 } else {
2571 ret = nmbm_read_logic_page(ni, off - leading,
2572 ni->page_cache, NULL,
2573 mode);
2574 if (ret)
2575 break;
2576
2577 memcpy(ptr, ni->page_cache + leading, chunksize);
2578 }
2579
2580 off += chunksize;
2581 ptr += chunksize;
2582 sizeremain -= chunksize;
2583 }
2584
2585 if (retlen)
2586 *retlen = size - sizeremain;
2587
2588 return ret;
2589}
2590
2591/*
2592 * nmbm_write_logic_page - Read page based on logic address
2593 * @ni: NMBM instance structure
2594 * @addr: logic linear address
2595 * @data: buffer contains main data. optional.
2596 * @oob: buffer contains oob data. optional.
2597 * @mode: write mode
2598 */
2599static int nmbm_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
2600 const void *data, const void *oob,
2601 enum nmbm_oob_mode mode)
2602{
2603 uint32_t lb, pb, offset;
2604 uint64_t paddr;
2605 bool success;
2606
2607 /* Extract block address and in-block offset */
2608 lb = addr2ba(ni, addr);
2609 offset = addr & ni->erasesize_mask;
2610
2611 /* Map logic block to physical block */
2612 pb = ni->block_mapping[lb];
2613
2614 /* Whether the logic block is good (has valid mapping) */
2615 if ((int32_t)pb < 0) {
2616 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2617 return -EIO;
2618 }
2619
2620 /* Fail if physical block is marked bad */
2621 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2622 return -EIO;
2623
2624 /* Assemble new address */
2625 paddr = ba2addr(ni, pb) + offset;
2626
2627 success = nmbm_write_phys_page(ni, paddr, data, oob, mode);
2628 if (success)
2629 return 0;
2630
2631 /*
2632 * Do not remap bad block here. Just mark this block in state table.
2633 * Remap this block on erasing.
2634 */
2635 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2636 nmbm_update_info_table(ni);
2637
2638 return -EIO;
2639}
2640
2641/*
2642 * nmbm_write_single_page - Write one page based on logic address
2643 * @ni: NMBM instance structure
2644 * @addr: logic linear address
2645 * @data: buffer contains main data. optional.
2646 * @oob: buffer contains oob data. optional.
2647 * @mode: write mode
2648 */
2649int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
2650 const void *data, const void *oob,
2651 enum nmbm_oob_mode mode)
2652{
2653 if (!ni)
2654 return -EINVAL;
2655
2656 /* Sanity check */
2657 if (ni->protected) {
2658 nlog_debug(ni, "Device is forced read-only\n");
2659 return -EROFS;
2660 }
2661
2662 if (addr >= ba2addr(ni, ni->data_block_count)) {
2663 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2664 return -EINVAL;
2665 }
2666
2667 return nmbm_write_logic_page(ni, addr, data, oob, mode);
2668}
2669
2670/*
2671 * nmbm_write_range - Write data without oob
2672 * @ni: NMBM instance structure
2673 * @addr: logic linear address
2674 * @size: data size to write
2675 * @data: buffer contains data to be written
2676 * @mode: write mode
2677 * @retlen: return actual data size written
2678 */
2679int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2680 const void *data, enum nmbm_oob_mode mode,
2681 size_t *retlen)
2682{
2683 uint64_t off = addr;
2684 const uint8_t *ptr = data;
2685 size_t sizeremain = size, chunksize, leading;
2686 int ret;
2687
2688 if (!ni)
2689 return -EINVAL;
2690
2691 /* Sanity check */
2692 if (ni->protected) {
2693 nlog_debug(ni, "Device is forced read-only\n");
2694 return -EROFS;
2695 }
2696
2697 if (addr >= ba2addr(ni, ni->data_block_count)) {
2698 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2699 return -EINVAL;
2700 }
2701
2702 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2703 nlog_err(ni, "Write size 0x%zx is too large\n", size);
2704 return -EINVAL;
2705 }
2706
2707 if (!size) {
2708 nlog_warn(ni, "No data to be written\n");
2709 return 0;
2710 }
2711
2712 while (sizeremain) {
2713 WATCHDOG_RESET();
2714
2715 leading = off & ni->writesize_mask;
2716 chunksize = ni->lower.writesize - leading;
2717 if (chunksize > sizeremain)
2718 chunksize = sizeremain;
2719
2720 if (chunksize == ni->lower.writesize) {
2721 ret = nmbm_write_logic_page(ni, off - leading, ptr,
2722 NULL, mode);
2723 if (ret)
2724 break;
2725 } else {
2726 memset(ni->page_cache, 0xff, leading);
2727 memcpy(ni->page_cache + leading, ptr, chunksize);
2728
2729 ret = nmbm_write_logic_page(ni, off - leading,
2730 ni->page_cache, NULL,
2731 mode);
2732 if (ret)
2733 break;
2734 }
2735
2736 off += chunksize;
2737 ptr += chunksize;
2738 sizeremain -= chunksize;
2739 }
2740
2741 if (retlen)
2742 *retlen = size - sizeremain;
2743
2744 return ret;
2745}
2746
2747/*
2748 * nmbm_check_bad_block - Check whether a logic block is usable
2749 * @ni: NMBM instance structure
2750 * @addr: logic linear address
2751 */
2752int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr)
2753{
2754 uint32_t lb, pb;
2755
2756 if (!ni)
2757 return -EINVAL;
2758
2759 if (addr >= ba2addr(ni, ni->data_block_count)) {
2760 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2761 return -EINVAL;
2762 }
2763
2764 lb = addr2ba(ni, addr);
2765
2766 /* Map logic block to physical block */
2767 pb = ni->block_mapping[lb];
2768
2769 if ((int32_t)pb < 0)
2770 return 1;
2771
2772 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2773 return 1;
2774
2775 return 0;
2776}
2777
2778/*
2779 * nmbm_mark_bad_block - Mark a logic block unusable
2780 * @ni: NMBM instance structure
2781 * @addr: logic linear address
2782 */
2783int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr)
2784{
2785 uint32_t lb, pb;
2786
2787 if (!ni)
2788 return -EINVAL;
2789
2790 if (addr >= ba2addr(ni, ni->data_block_count)) {
2791 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2792 return -EINVAL;
2793 }
2794
2795 lb = addr2ba(ni, addr);
2796
2797 /* Map logic block to physical block */
2798 pb = ni->block_mapping[lb];
2799
2800 if ((int32_t)pb < 0)
2801 return 0;
2802
2803 ni->block_mapping[lb] = -1;
2804 nmbm_mark_phys_bad_block(ni, pb);
2805 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2806 nmbm_update_info_table(ni);
2807
2808 return 0;
2809}
2810
2811/*
2812 * nmbm_get_avail_size - Get available user data size
2813 * @ni: NMBM instance structure
2814 */
2815uint64_t nmbm_get_avail_size(struct nmbm_instance *ni)
2816{
2817 if (!ni)
2818 return 0;
2819
2820 return (uint64_t)ni->data_block_count << ni->erasesize_shift;
2821}
2822
2823/*
2824 * nmbm_get_lower_device - Get lower device structure
2825 * @ni: NMBM instance structure
2826 * @nld: pointer to hold the data of lower device structure
2827 */
2828int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld)
2829{
2830 if (!ni)
2831 return -EINVAL;
2832
2833 if (nld)
2834 memcpy(nld, &ni->lower, sizeof(*nld));
2835
2836 return 0;
2837}
2838
2839#include "nmbm-debug.inl"