blob: b80ea425a027bf63629ac9cdff6eabfdd6c76ba4 [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2021 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "nmbm-private.h"
9
10#include "nmbm-debug.h"
11
12#define NMBM_VER_MAJOR 1
13#define NMBM_VER_MINOR 0
14#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
15 NMBM_VER_MINOR)
16
17#define NMBM_ALIGN(v, a) (((v) + (a) - 1) & ~((a) - 1))
18
19/*****************************************************************************/
20/* Logging related functions */
21/*****************************************************************************/
22
23/*
24 * nmbm_log_lower - Print log using OS specific routine
25 * @nld: NMBM lower device structure
26 * @level: log level
27 * @fmt: format string
28 */
29static void nmbm_log_lower(struct nmbm_lower_device *nld,
30 enum nmbm_log_category level, const char *fmt, ...)
31{
32 va_list ap;
33
34 if (!nld->logprint)
35 return;
36
37 va_start(ap, fmt);
38 nld->logprint(nld->arg, level, fmt, ap);
39 va_end(ap);
40}
41
42/*
43 * nmbm_log - Print log using OS specific routine
44 * @ni: NMBM instance structure
45 * @level: log level
46 * @fmt: format string
47 */
48static void nmbm_log(struct nmbm_instance *ni, enum nmbm_log_category level,
49 const char *fmt, ...)
50{
51 va_list ap;
52
53 if (!ni)
54 return;
55
56 if (!ni->lower.logprint || level < ni->log_display_level)
57 return;
58
59 va_start(ap, fmt);
60 ni->lower.logprint(ni->lower.arg, level, fmt, ap);
61 va_end(ap);
62}
63
64/*
65 * nmbm_set_log_level - Set log display level
66 * @ni: NMBM instance structure
67 * @level: log display level
68 */
69enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
70 enum nmbm_log_category level)
71{
72 enum nmbm_log_category old;
73
74 if (!ni)
75 return __NMBM_LOG_MAX;
76
77 old = ni->log_display_level;
78 ni->log_display_level = level;
79 return old;
80}
81
82/*
83 * nlog_table_creation - Print log of table creation event
84 * @ni: NMBM instance structure
85 * @main_table: whether the table is main info table
86 * @start_ba: start block address of the table
87 * @end_ba: block address after the end of the table
88 */
89static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
90 uint32_t start_ba, uint32_t end_ba)
91{
92 if (start_ba == end_ba - 1)
93 nlog_info(ni, "%s info table has been written to block %u\n",
94 main_table ? "Main" : "Backup", start_ba);
95 else
96 nlog_info(ni, "%s info table has been written to block %u-%u\n",
97 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
98
99 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
100}
101
102/*
103 * nlog_table_update - Print log of table update event
104 * @ni: NMBM instance structure
105 * @main_table: whether the table is main info table
106 * @start_ba: start block address of the table
107 * @end_ba: block address after the end of the table
108 */
109static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
110 uint32_t start_ba, uint32_t end_ba)
111{
112 if (start_ba == end_ba - 1)
113 nlog_debug(ni, "%s info table has been updated in block %u\n",
114 main_table ? "Main" : "Backup", start_ba);
115 else
116 nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
117 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
118
119 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
120}
121
122/*
123 * nlog_table_found - Print log of table found event
124 * @ni: NMBM instance structure
125 * @first_table: whether the table is first found info table
126 * @write_count: write count of the info table
127 * @start_ba: start block address of the table
128 * @end_ba: block address after the end of the table
129 */
130static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
131 uint32_t write_count, uint32_t start_ba,
132 uint32_t end_ba)
133{
134 if (start_ba == end_ba - 1)
135 nlog_info(ni, "%s info table with writecount %u found in block %u\n",
136 first_table ? "First" : "Second", write_count,
137 start_ba);
138 else
139 nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
140 first_table ? "First" : "Second", write_count,
141 start_ba, end_ba - 1);
142
143 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
144}
145
146/*****************************************************************************/
147/* Address conversion functions */
148/*****************************************************************************/
149
150/*
151 * addr2ba - Convert a linear address to block address
152 * @ni: NMBM instance structure
153 * @addr: Linear address
154 */
155static uint32_t addr2ba(struct nmbm_instance *ni, uint64_t addr)
156{
157 return addr >> ni->erasesize_shift;
158}
159
160/*
161 * ba2addr - Convert a block address to linear address
162 * @ni: NMBM instance structure
163 * @ba: Block address
164 */
165static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
166{
167 return (uint64_t)ba << ni->erasesize_shift;
168}
169/*
170 * size2blk - Get minimum required blocks for storing specific size of data
171 * @ni: NMBM instance structure
172 * @size: size for storing
173 */
174static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
175{
176 return (size + ni->lower.erasesize - 1) >> ni->erasesize_shift;
177}
178
179/*****************************************************************************/
180/* High level NAND chip APIs */
181/*****************************************************************************/
182
183/*
184 * nmbm_reset_chip - Reset NAND device
185 * @nld: Lower NAND chip structure
186 */
187static void nmbm_reset_chip(struct nmbm_instance *ni)
188{
189 if (ni->lower.reset_chip)
190 ni->lower.reset_chip(ni->lower.arg);
191}
192
193/*
194 * nmbm_read_phys_page - Read page with retry
195 * @ni: NMBM instance structure
196 * @addr: linear address where the data will be read from
197 * @data: the main data to be read
198 * @oob: the oob data to be read
199 * @mode: mode for processing oob data
200 *
201 * Read a page for at most NMBM_TRY_COUNT times.
202 *
developerd1457c92021-06-16 17:23:18 +0800203 * Return 0 for success, positive value for corrected bitflip count,
204 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +0800205 */
206static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
207 void *data, void *oob, enum nmbm_oob_mode mode)
208{
209 int tries, ret;
210
211 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
212 ret = ni->lower.read_page(ni->lower.arg, addr, data, oob, mode);
developerd1457c92021-06-16 17:23:18 +0800213 if (ret >= 0)
214 return ret;
developer8d16ac22021-05-26 15:32:12 +0800215
216 nmbm_reset_chip(ni);
217 }
218
developerd1457c92021-06-16 17:23:18 +0800219 if (ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +0800220 nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
221
222 return ret;
223}
224
225/*
226 * nmbm_write_phys_page - Write page with retry
227 * @ni: NMBM instance structure
228 * @addr: linear address where the data will be written to
229 * @data: the main data to be written
230 * @oob: the oob data to be written
231 * @mode: mode for processing oob data
232 *
233 * Write a page for at most NMBM_TRY_COUNT times.
234 */
235static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
236 const void *data, const void *oob,
237 enum nmbm_oob_mode mode)
238{
239 int tries, ret;
240
developer49f853a2021-06-23 17:22:02 +0800241 if (ni->lower.flags & NMBM_F_READ_ONLY) {
242 nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
243 return false;
244 }
245
developer8d16ac22021-05-26 15:32:12 +0800246 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
247 ret = ni->lower.write_page(ni->lower.arg, addr, data, oob, mode);
248 if (!ret)
249 return true;
250
251 nmbm_reset_chip(ni);
252 }
253
254 nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
255
256 return false;
257}
258
259/*
260 * nmbm_erase_phys_block - Erase a block with retry
261 * @ni: NMBM instance structure
262 * @addr: Linear address
263 *
264 * Erase a block for at most NMBM_TRY_COUNT times.
265 */
266static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
267{
268 int tries, ret;
269
developer49f853a2021-06-23 17:22:02 +0800270 if (ni->lower.flags & NMBM_F_READ_ONLY) {
271 nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
272 return false;
273 }
274
developer8d16ac22021-05-26 15:32:12 +0800275 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
276 ret = ni->lower.erase_block(ni->lower.arg, addr);
277 if (!ret)
278 return true;
279
280 nmbm_reset_chip(ni);
281 }
282
283 nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
284
285 return false;
286}
287
288/*
289 * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
290 * @ni: NMBM instance structure
291 * @ba: block address
292 */
293static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
294{
295 uint64_t addr = ba2addr(ni, ba);
296 int ret;
297
298 if (ni->lower.is_bad_block)
299 return ni->lower.is_bad_block(ni->lower.arg, addr);
300
301 /* Treat ECC error as read success */
302 ret = nmbm_read_phys_page(ni, addr, NULL,
303 ni->page_cache + ni->lower.writesize,
developerd8912b32021-06-16 17:22:36 +0800304 NMBM_MODE_RAW);
developerd1457c92021-06-16 17:23:18 +0800305 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +0800306 return true;
307
308 return ni->page_cache[ni->lower.writesize] != 0xff;
309}
310
311/*
312 * nmbm_mark_phys_bad_block - Mark a block bad
313 * @ni: NMBM instance structure
314 * @addr: Linear address
315 */
316static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
317{
318 uint64_t addr = ba2addr(ni, ba);
319 enum nmbm_log_category level;
320 uint32_t off;
321
developer49f853a2021-06-23 17:22:02 +0800322 if (ni->lower.flags & NMBM_F_READ_ONLY) {
323 nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
324 return false;
325 }
326
developer8d16ac22021-05-26 15:32:12 +0800327 nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
328
329 if (ni->lower.mark_bad_block)
330 return ni->lower.mark_bad_block(ni->lower.arg, addr);
331
332 /* Whole page set to 0x00 */
333 memset(ni->page_cache, 0, ni->rawpage_size);
334
335 /* Write to all pages within this block, disable all errors */
336 level = nmbm_set_log_level(ni, __NMBM_LOG_MAX);
337
338 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
339 nmbm_write_phys_page(ni, addr + off, ni->page_cache,
340 ni->page_cache + ni->lower.writesize,
341 NMBM_MODE_RAW);
342 }
343
344 nmbm_set_log_level(ni, level);
345
346 return 0;
347}
348
349/*****************************************************************************/
350/* NMBM related functions */
351/*****************************************************************************/
352
353/*
354 * nmbm_check_header - Check whether a NMBM structure is valid
355 * @data: pointer to a NMBM structure with a NMBM header at beginning
356 * @size: Size of the buffer pointed by @header
357 *
358 * The size of the NMBM structure may be larger than NMBM header,
359 * e.g. block mapping table and block state table.
360 */
361static bool nmbm_check_header(const void *data, uint32_t size)
362{
363 const struct nmbm_header *header = data;
364 struct nmbm_header nhdr;
365 uint32_t new_checksum;
366
367 /*
368 * Make sure expected structure size is equal or smaller than
369 * buffer size.
370 */
371 if (header->size > size)
372 return false;
373
374 memcpy(&nhdr, data, sizeof(nhdr));
375
376 nhdr.checksum = 0;
377 new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
378 if (header->size > sizeof(nhdr))
379 new_checksum = nmbm_crc32(new_checksum,
380 (const uint8_t *)data + sizeof(nhdr),
381 header->size - sizeof(nhdr));
382
383 if (header->checksum != new_checksum)
384 return false;
385
386 return true;
387}
388
389/*
390 * nmbm_update_checksum - Update checksum of a NMBM structure
391 * @header: pointer to a NMBM structure with a NMBM header at beginning
392 *
393 * The size of the NMBM structure must be specified by @header->size
394 */
395static void nmbm_update_checksum(struct nmbm_header *header)
396{
397 header->checksum = 0;
398 header->checksum = nmbm_crc32(0, header, header->size);
399}
400
401/*
402 * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
403 * @block_count: number of blocks of data
404 *
405 * Calculate number of blocks should be reserved for data
406 */
407static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
408{
409 uint32_t val;
410
411 val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
412 val *= NMBM_SPARE_BLOCK_MULTI;
413
414 if (val < NMBM_SPARE_BLOCK_MIN)
415 val = NMBM_SPARE_BLOCK_MIN;
416
417 return val;
418}
419
420/*
421 * nmbm_get_block_state_raw - Get state of a block from raw block state table
422 * @block_state: pointer to raw block state table (bitmap)
423 * @ba: block address
424 */
425static uint32_t nmbm_get_block_state_raw(nmbm_bitmap_t *block_state,
426 uint32_t ba)
427{
428 uint32_t unit, shift;
429
430 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
431 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
432
433 return (block_state[unit] >> shift) & BLOCK_ST_MASK;
434}
435
436/*
437 * nmbm_get_block_state - Get state of a block from block state table
438 * @ni: NMBM instance structure
439 * @ba: block address
440 */
441static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
442{
443 return nmbm_get_block_state_raw(ni->block_state, ba);
444}
445
446/*
447 * nmbm_set_block_state - Set state of a block to block state table
448 * @ni: NMBM instance structure
449 * @ba: block address
450 * @state: block state
451 *
452 * Set state of a block. If the block state changed, ni->block_state_changed
453 * will be increased.
454 */
455static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
456 uint32_t state)
457{
458 uint32_t unit, shift, orig;
459 nmbm_bitmap_t uv;
460
461 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
462 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
463
464 orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
465 state &= BLOCK_ST_MASK;
466
467 uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
468 uv |= state << shift;
469 ni->block_state[unit] = uv;
470
471 if (state == BLOCK_ST_BAD)
472 nmbm_mark_block_color_bad(ni, ba);
473
474 if (orig != state) {
475 ni->block_state_changed++;
476 return true;
477 }
478
479 return false;
480}
481
482/*
483 * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
484 * @ni: NMBM instance structure
485 * @ba: start physical block address
486 * @nba: return physical block address after walk
487 * @count: number of good blocks to be skipped
488 * @limit: highest block address allowed for walking
489 *
490 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
491 * return the next good block address.
492 *
493 * If no enough good blocks counted while @limit reached, false will be returned.
494 *
495 * If @count == 0, nearest good block address will be returned.
496 * @limit is not counted in walking.
497 */
498static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
499 uint32_t *nba, uint32_t count,
500 uint32_t limit)
501{
502 int32_t nblock = count;
503
504 if (limit >= ni->block_count)
505 limit = ni->block_count - 1;
506
507 while (ba < limit) {
508 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
509 nblock--;
510
511 if (nblock < 0) {
512 *nba = ba;
513 return true;
514 }
515
516 ba++;
517 }
518
519 return false;
520}
521
522/*
523 * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
524 * @ni: NMBM instance structure
525 * @ba: start physical block address
526 * @nba: return physical block address after walk
527 * @count: number of good blocks to be skipped
528 * @limit: lowest block address allowed for walking
529 *
530 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
531 * return the next good block address.
532 *
533 * If no enough good blocks counted while @limit reached, false will be returned.
534 *
535 * If @count == 0, nearest good block address will be returned.
536 * @limit is not counted in walking.
537 */
538static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
539 uint32_t *nba, uint32_t count, uint32_t limit)
540{
541 int32_t nblock = count;
542
543 if (limit >= ni->block_count)
544 limit = ni->block_count - 1;
545
546 while (ba > limit) {
547 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
548 nblock--;
549
550 if (nblock < 0) {
551 *nba = ba;
552 return true;
553 }
554
555 ba--;
556 }
557
558 return false;
559}
560
561/*
562 * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
563 * @ni: NMBM instance structure
564 * @ascending: whether to walk ascending
565 * @ba: start physical block address
566 * @nba: return physical block address after walk
567 * @count: number of good blocks to be skipped
568 * @limit: highest/lowest block address allowed for walking
569 *
570 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
571 * return the next good block address.
572 *
573 * If no enough good blocks counted while @limit reached, false will be returned.
574 *
575 * If @count == 0, nearest good block address will be returned.
576 * @limit can be set to negative if no limit required.
577 * @limit is not counted in walking.
578 */
579static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
580 uint32_t ba, uint32_t *nba, int32_t count,
581 int32_t limit)
582{
583 if (ascending)
584 return nmbm_block_walk_asc(ni, ba, nba, count, limit);
585
586 return nmbm_block_walk_desc(ni, ba, nba, count, limit);
587}
588
589/*
590 * nmbm_scan_badblocks - Scan and record all bad blocks
591 * @ni: NMBM instance structure
592 *
593 * Scan the entire lower NAND chip and record all bad blocks in to block state
594 * table.
595 */
596static void nmbm_scan_badblocks(struct nmbm_instance *ni)
597{
598 uint32_t ba;
599
600 for (ba = 0; ba < ni->block_count; ba++) {
601 if (nmbm_check_bad_phys_block(ni, ba)) {
602 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
603 nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
604 ba2addr(ni, ba));
605 }
606 }
607}
608
609/*
610 * nmbm_build_mapping_table - Build initial block mapping table
611 * @ni: NMBM instance structure
612 *
613 * The initial mapping table will be compatible with the stratage of
614 * factory production.
615 */
616static void nmbm_build_mapping_table(struct nmbm_instance *ni)
617{
618 uint32_t pb, lb;
619
620 for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
621 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
622 continue;
623
624 /* Always map to the next good block */
625 ni->block_mapping[lb++] = pb;
626 }
627
628 ni->data_block_count = lb;
629
630 /* Unusable/Management blocks */
631 for (pb = lb; pb < ni->block_count; pb++)
632 ni->block_mapping[pb] = -1;
633}
634
635/*
developer28a313b2021-06-16 17:23:34 +0800636 * nmbm_erase_block_and_check - Erase a block and check its usability
637 * @ni: NMBM instance structure
638 * @ba: block address to be erased
639 *
640 * Erase a block anc check its usability
641 *
642 * Return true if the block is usable, false if erasure failure or the block
643 * has too many bitflips.
644 */
645static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba)
646{
647 uint64_t addr, off;
648 bool success;
649 int ret;
650
651 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
652 if (!success)
653 return false;
654
655 if (!(ni->lower.flags & NMBM_F_EMPTY_PAGE_ECC_OK))
656 return true;
657
658 /* Check every page to make sure there aren't too many bitflips */
659
660 addr = ba2addr(ni, ba);
661
662 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
663 WATCHDOG_RESET();
664
665 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
666 NMBM_MODE_PLACE_OOB);
667 if (ret == -EBADMSG) {
668 /*
669 * NMBM_F_EMPTY_PAGE_ECC_OK means the empty page is
670 * still protected by ECC. So reading pages with ECC
671 * enabled and -EBADMSG means there are too many
672 * bitflips that can't be recovered, and the block
673 * containing the page should be marked bad.
674 */
675 nlog_err(ni,
676 "Too many bitflips in empty page at 0x%llx\n",
677 addr + off);
678 return false;
679 }
680 }
681
682 return true;
683}
684
685/*
developer8d16ac22021-05-26 15:32:12 +0800686 * nmbm_erase_range - Erase a range of blocks
687 * @ni: NMBM instance structure
688 * @ba: block address where the erasure will start
689 * @limit: top block address allowed for erasure
690 *
691 * Erase blocks within the specific range. Newly-found bad blocks will be
692 * marked.
693 *
694 * @limit is not counted into the allowed erasure address.
695 */
696static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
697 uint32_t limit)
698{
699 bool success;
700
701 while (ba < limit) {
702 WATCHDOG_RESET();
703
704 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
705 goto next_block;
706
developer4f9017d2021-06-16 17:18:47 +0800707 /* Insurance to detect unexpected bad block marked by user */
708 if (nmbm_check_bad_phys_block(ni, ba)) {
709 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
710 goto next_block;
711 }
712
developer28a313b2021-06-16 17:23:34 +0800713 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800714 if (success)
715 goto next_block;
716
717 nmbm_mark_phys_bad_block(ni, ba);
718 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
719
720 next_block:
721 ba++;
722 }
723}
724
725/*
726 * nmbm_write_repeated_data - Write critical data to a block with retry
727 * @ni: NMBM instance structure
728 * @ba: block address where the data will be written to
729 * @data: the data to be written
730 * @size: size of the data
731 *
732 * Write data to every page of the block. Success only if all pages within
733 * this block have been successfully written.
734 *
735 * Make sure data size is not bigger than one page.
736 *
737 * This function will write and verify every page for at most
738 * NMBM_TRY_COUNT times.
739 */
740static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
741 const void *data, uint32_t size)
742{
743 uint64_t addr, off;
744 bool success;
745 int ret;
746
747 if (size > ni->lower.writesize)
748 return false;
749
750 addr = ba2addr(ni, ba);
751
752 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
753 WATCHDOG_RESET();
754
755 /* Prepare page data. fill 0xff to unused region */
756 memcpy(ni->page_cache, data, size);
757 memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
758
759 success = nmbm_write_phys_page(ni, addr + off, ni->page_cache,
760 NULL, NMBM_MODE_PLACE_OOB);
761 if (!success)
762 return false;
763
764 /* Verify the data just written. ECC error indicates failure */
765 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
766 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800767 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800768 return false;
769
770 if (memcmp(ni->page_cache, data, size))
771 return false;
772 }
773
774 return true;
775}
776
777/*
778 * nmbm_write_signature - Write signature to NAND chip
779 * @ni: NMBM instance structure
780 * @limit: top block address allowed for writing
781 * @signature: the signature to be written
782 * @signature_ba: the actual block address where signature is written to
783 *
784 * Write signature within a specific range, from chip bottom to limit.
785 * At most one block will be written.
786 *
787 * @limit is not counted into the allowed write address.
788 */
789static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
790 const struct nmbm_signature *signature,
791 uint32_t *signature_ba)
792{
793 uint32_t ba = ni->block_count - 1;
794 bool success;
795
796 while (ba > limit) {
797 WATCHDOG_RESET();
798
799 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
800 goto next_block;
developer4f9017d2021-06-16 17:18:47 +0800801
802 /* Insurance to detect unexpected bad block marked by user */
803 if (nmbm_check_bad_phys_block(ni, ba)) {
804 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
805 goto next_block;
806 }
developer8d16ac22021-05-26 15:32:12 +0800807
developer28a313b2021-06-16 17:23:34 +0800808 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800809 if (!success)
810 goto skip_bad_block;
811
812 success = nmbm_write_repeated_data(ni, ba, signature,
813 sizeof(*signature));
814 if (success) {
815 *signature_ba = ba;
816 return true;
817 }
818
819 skip_bad_block:
820 nmbm_mark_phys_bad_block(ni, ba);
821 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
822
823 next_block:
824 ba--;
825 };
826
827 return false;
828}
829
830/*
831 * nmbn_read_data - Read data
832 * @ni: NMBM instance structure
833 * @addr: linear address where the data will be read from
834 * @data: the data to be read
835 * @size: the size of data
836 *
837 * Read data range.
838 * Every page will be tried for at most NMBM_TRY_COUNT times.
839 *
developerd1457c92021-06-16 17:23:18 +0800840 * Return 0 for success, positive value for corrected bitflip count,
841 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +0800842 */
843static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
844 uint32_t size)
845{
846 uint64_t off = addr;
847 uint8_t *ptr = data;
848 uint32_t sizeremain = size, chunksize, leading;
849 int ret;
850
851 while (sizeremain) {
852 WATCHDOG_RESET();
853
854 leading = off & ni->writesize_mask;
855 chunksize = ni->lower.writesize - leading;
856 if (chunksize > sizeremain)
857 chunksize = sizeremain;
858
859 if (chunksize == ni->lower.writesize) {
860 ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL,
861 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800862 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800863 return ret;
864 } else {
865 ret = nmbm_read_phys_page(ni, off - leading,
866 ni->page_cache, NULL,
867 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800868 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800869 return ret;
870
871 memcpy(ptr, ni->page_cache + leading, chunksize);
872 }
873
874 off += chunksize;
875 ptr += chunksize;
876 sizeremain -= chunksize;
877 }
878
879 return 0;
880}
881
882/*
883 * nmbn_write_verify_data - Write data with validation
884 * @ni: NMBM instance structure
885 * @addr: linear address where the data will be written to
886 * @data: the data to be written
887 * @size: the size of data
888 *
889 * Write data and verify.
890 * Every page will be tried for at most NMBM_TRY_COUNT times.
891 */
892static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
893 const void *data, uint32_t size)
894{
895 uint64_t off = addr;
896 const uint8_t *ptr = data;
897 uint32_t sizeremain = size, chunksize, leading;
898 bool success;
899 int ret;
900
901 while (sizeremain) {
902 WATCHDOG_RESET();
903
904 leading = off & ni->writesize_mask;
905 chunksize = ni->lower.writesize - leading;
906 if (chunksize > sizeremain)
907 chunksize = sizeremain;
908
909 /* Prepare page data. fill 0xff to unused region */
910 memset(ni->page_cache, 0xff, ni->rawpage_size);
911 memcpy(ni->page_cache + leading, ptr, chunksize);
912
913 success = nmbm_write_phys_page(ni, off - leading,
914 ni->page_cache, NULL,
915 NMBM_MODE_PLACE_OOB);
916 if (!success)
917 return false;
918
919 /* Verify the data just written. ECC error indicates failure */
920 ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache,
921 NULL, NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800922 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800923 return false;
924
925 if (memcmp(ni->page_cache + leading, ptr, chunksize))
926 return false;
927
928 off += chunksize;
929 ptr += chunksize;
930 sizeremain -= chunksize;
931 }
932
933 return true;
934}
935
936/*
937 * nmbm_write_mgmt_range - Write management data into NAND within a range
938 * @ni: NMBM instance structure
939 * @addr: preferred start block address for writing
940 * @limit: highest block address allowed for writing
941 * @data: the data to be written
942 * @size: the size of data
943 * @actual_start_ba: actual start block address of data
944 * @actual_end_ba: block address after the end of data
945 *
946 * @limit is not counted into the allowed write address.
947 */
948static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
949 uint32_t limit, const void *data,
950 uint32_t size, uint32_t *actual_start_ba,
951 uint32_t *actual_end_ba)
952{
953 const uint8_t *ptr = data;
954 uint32_t sizeremain = size, chunksize;
955 bool success;
956
957 while (sizeremain && ba < limit) {
958 WATCHDOG_RESET();
959
960 chunksize = sizeremain;
961 if (chunksize > ni->lower.erasesize)
962 chunksize = ni->lower.erasesize;
963
964 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
965 goto next_block;
966
developer4f9017d2021-06-16 17:18:47 +0800967 /* Insurance to detect unexpected bad block marked by user */
968 if (nmbm_check_bad_phys_block(ni, ba)) {
969 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
970 goto next_block;
971 }
972
developer28a313b2021-06-16 17:23:34 +0800973 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800974 if (!success)
975 goto skip_bad_block;
976
977 success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
978 chunksize);
979 if (!success)
980 goto skip_bad_block;
981
982 if (sizeremain == size)
983 *actual_start_ba = ba;
984
985 ptr += chunksize;
986 sizeremain -= chunksize;
987
988 goto next_block;
989
990 skip_bad_block:
991 nmbm_mark_phys_bad_block(ni, ba);
992 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
993
994 next_block:
995 ba++;
996 }
997
998 if (sizeremain)
999 return false;
1000
1001 *actual_end_ba = ba;
1002
1003 return true;
1004}
1005
1006/*
1007 * nmbm_generate_info_table_cache - Generate info table cache data
1008 * @ni: NMBM instance structure
1009 *
1010 * Generate info table cache data to be written into flash.
1011 */
1012static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
1013{
1014 bool changed = false;
1015
1016 memset(ni->info_table_cache, 0xff, ni->info_table_size);
1017
1018 memcpy(ni->info_table_cache + ni->info_table.state_table_off,
1019 ni->block_state, ni->state_table_size);
1020
1021 memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
1022 ni->block_mapping, ni->mapping_table_size);
1023
1024 ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
1025 ni->info_table.header.version = NMBM_VER;
1026 ni->info_table.header.size = ni->info_table_size;
1027
1028 if (ni->block_state_changed || ni->block_mapping_changed) {
1029 ni->info_table.write_count++;
1030 changed = true;
1031 }
1032
1033 memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
1034
1035 nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
1036
1037 return changed;
1038}
1039
1040/*
1041 * nmbm_write_info_table - Write info table into NAND within a range
1042 * @ni: NMBM instance structure
1043 * @ba: preferred start block address for writing
1044 * @limit: highest block address allowed for writing
1045 * @actual_start_ba: actual start block address of info table
1046 * @actual_end_ba: block address after the end of info table
1047 *
1048 * @limit is counted into the allowed write address.
1049 */
1050static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
1051 uint32_t limit, uint32_t *actual_start_ba,
1052 uint32_t *actual_end_ba)
1053{
1054 return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
1055 ni->info_table_size, actual_start_ba,
1056 actual_end_ba);
1057}
1058
1059/*
1060 * nmbm_mark_tables_clean - Mark info table `clean'
1061 * @ni: NMBM instance structure
1062 */
1063static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
1064{
1065 ni->block_state_changed = 0;
1066 ni->block_mapping_changed = 0;
1067}
1068
1069/*
1070 * nmbm_try_reserve_blocks - Reserve blocks with compromisation
1071 * @ni: NMBM instance structure
1072 * @ba: start physical block address
1073 * @nba: return physical block address after reservation
1074 * @count: number of good blocks to be skipped
1075 * @min_count: minimum number of good blocks to be skipped
1076 * @limit: highest/lowest block address allowed for walking
1077 *
1078 * Reserve specific blocks. If failed, try to reserve as many as possible.
1079 */
1080static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
1081 uint32_t *nba, uint32_t count,
1082 int32_t min_count, int32_t limit)
1083{
1084 int32_t nblocks = count;
1085 bool success;
1086
1087 while (nblocks >= min_count) {
1088 success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
1089 if (success)
1090 return true;
1091
1092 nblocks--;
1093 }
1094
1095 return false;
1096}
1097
1098/*
1099 * nmbm_rebuild_info_table - Build main & backup info table from scratch
1100 * @ni: NMBM instance structure
1101 * @allow_no_gap: allow no spare blocks between two tables
1102 */
1103static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
1104{
1105 uint32_t table_start_ba, table_end_ba, next_start_ba;
1106 uint32_t main_table_end_ba;
1107 bool success;
1108
1109 /* Set initial value */
1110 ni->main_table_ba = 0;
1111 ni->backup_table_ba = 0;
1112 ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
1113
1114 /* Write main table */
1115 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1116 ni->mapping_blocks_top_ba,
1117 &table_start_ba, &table_end_ba);
1118 if (!success) {
1119 /* Failed to write main table, data will be lost */
1120 nlog_emerg(ni, "Unable to write at least one info table!\n");
1121 nlog_emerg(ni, "Please save your data before power off!\n");
1122 ni->protected = 1;
1123 return false;
1124 }
1125
1126 /* Main info table is successfully written, record its offset */
1127 ni->main_table_ba = table_start_ba;
1128 main_table_end_ba = table_end_ba;
1129
1130 /* Adjust mapping_blocks_ba */
1131 ni->mapping_blocks_ba = table_end_ba;
1132
1133 nmbm_mark_tables_clean(ni);
1134
1135 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1136
1137 /* Reserve spare blocks for main info table. */
1138 success = nmbm_try_reserve_blocks(ni, table_end_ba,
1139 &next_start_ba,
1140 ni->info_table_spare_blocks, 0,
1141 ni->mapping_blocks_top_ba -
1142 size2blk(ni, ni->info_table_size));
1143 if (!success) {
1144 /* There is no spare block. */
1145 nlog_debug(ni, "No room for backup info table\n");
1146 return true;
1147 }
1148
1149 /* Write backup info table. */
1150 success = nmbm_write_info_table(ni, next_start_ba,
1151 ni->mapping_blocks_top_ba,
1152 &table_start_ba, &table_end_ba);
1153 if (!success) {
1154 /* There is no enough blocks for backup table. */
1155 nlog_debug(ni, "No room for backup info table\n");
1156 return true;
1157 }
1158
1159 /* Backup table is successfully written, record its offset */
1160 ni->backup_table_ba = table_start_ba;
1161
1162 /* Adjust mapping_blocks_off */
1163 ni->mapping_blocks_ba = table_end_ba;
1164
1165 /* Erase spare blocks of main table to clean possible interference data */
1166 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1167
1168 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1169
1170 return true;
1171}
1172
1173/*
1174 * nmbm_rescue_single_info_table - Rescue when there is only one info table
1175 * @ni: NMBM instance structure
1176 *
1177 * This function is called when there is only one info table exists.
1178 * This function may fail if we can't write new info table
1179 */
1180static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
1181{
1182 uint32_t table_start_ba, table_end_ba, write_ba;
1183 bool success;
1184
1185 /* Try to write new info table in front of existing table */
1186 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1187 ni->main_table_ba,
1188 &table_start_ba,
1189 &table_end_ba);
1190 if (success) {
1191 /*
1192 * New table becomes the main table, existing table becomes
1193 * the backup table.
1194 */
1195 ni->backup_table_ba = ni->main_table_ba;
1196 ni->main_table_ba = table_start_ba;
1197
1198 nmbm_mark_tables_clean(ni);
1199
1200 /* Erase spare blocks of main table to clean possible interference data */
1201 nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
1202
1203 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1204
1205 return true;
1206 }
1207
1208 /* Try to reserve spare blocks for existing table */
1209 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1210 ni->info_table_spare_blocks, 0,
1211 ni->mapping_blocks_top_ba -
1212 size2blk(ni, ni->info_table_size));
1213 if (!success) {
1214 nlog_warn(ni, "Failed to rescue single info table\n");
1215 return false;
1216 }
1217
1218 /* Try to write new info table next to the existing table */
1219 while (write_ba >= ni->mapping_blocks_ba) {
1220 WATCHDOG_RESET();
1221
1222 success = nmbm_write_info_table(ni, write_ba,
1223 ni->mapping_blocks_top_ba,
1224 &table_start_ba,
1225 &table_end_ba);
1226 if (success)
1227 break;
1228
1229 write_ba--;
1230 }
1231
1232 if (success) {
1233 /* Erase spare blocks of main table to clean possible interference data */
1234 nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
1235
1236 /* New table becomes the backup table */
1237 ni->backup_table_ba = table_start_ba;
1238 ni->mapping_blocks_ba = table_end_ba;
1239
1240 nmbm_mark_tables_clean(ni);
1241
1242 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1243
1244 return true;
1245 }
1246
1247 nlog_warn(ni, "Failed to rescue single info table\n");
1248 return false;
1249}
1250
1251/*
1252 * nmbm_update_single_info_table - Update specific one info table
1253 * @ni: NMBM instance structure
1254 */
1255static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
1256 bool update_main_table)
1257{
1258 uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
1259 bool success;
1260
1261 /* Determine the write range */
1262 if (update_main_table) {
1263 write_start_ba = ni->main_table_ba;
1264 write_limit = ni->backup_table_ba;
1265 } else {
1266 write_start_ba = ni->backup_table_ba;
1267 write_limit = ni->mapping_blocks_top_ba;
1268 }
1269
1270 nmbm_mark_block_color_mgmt(ni, write_start_ba, write_limit - 1);
1271
1272 success = nmbm_write_info_table(ni, write_start_ba, write_limit,
1273 &table_start_ba, &table_end_ba);
1274 if (success) {
1275 if (update_main_table) {
1276 ni->main_table_ba = table_start_ba;
1277 } else {
1278 ni->backup_table_ba = table_start_ba;
1279 ni->mapping_blocks_ba = table_end_ba;
1280 }
1281
1282 nmbm_mark_tables_clean(ni);
1283
1284 nlog_table_update(ni, update_main_table, table_start_ba,
1285 table_end_ba);
1286
1287 return true;
1288 }
1289
1290 if (update_main_table) {
1291 /*
1292 * If failed to update main table, make backup table the new
1293 * main table, and call nmbm_rescue_single_info_table()
1294 */
1295 nlog_warn(ni, "Unable to update %s info table\n",
1296 update_main_table ? "Main" : "Backup");
1297
1298 ni->main_table_ba = ni->backup_table_ba;
1299 ni->backup_table_ba = 0;
1300 return nmbm_rescue_single_info_table(ni);
1301 }
1302
1303 /* Only one table left */
1304 ni->mapping_blocks_ba = ni->backup_table_ba;
1305 ni->backup_table_ba = 0;
1306
1307 return false;
1308}
1309
1310/*
1311 * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1312 * @ni: NMBM instance structure
1313 *
1314 * This function is called when main info table failed to be written, and
1315 * backup info table exists.
1316 */
1317static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
1318{
1319 uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
1320 uint32_t main_table_end_ba, write_ba;
1321 uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
1322 bool success;
1323
1324 /* Try to reserve spare blocks for existing backup info table */
1325 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1326 ni->info_table_spare_blocks, 0,
1327 ni->mapping_blocks_top_ba -
1328 info_table_erasesize);
1329 if (!success) {
1330 /* There is no spare block. Backup info table becomes the main table. */
1331 nlog_err(ni, "No room for temporary info table\n");
1332 ni->main_table_ba = ni->backup_table_ba;
1333 ni->backup_table_ba = 0;
1334 return true;
1335 }
1336
1337 /* Try to write temporary info table into spare unmapped blocks */
1338 while (write_ba >= ni->mapping_blocks_ba) {
1339 WATCHDOG_RESET();
1340
1341 success = nmbm_write_info_table(ni, write_ba,
1342 ni->mapping_blocks_top_ba,
1343 &tmp_table_start_ba,
1344 &tmp_table_end_ba);
1345 if (success)
1346 break;
1347
1348 write_ba--;
1349 }
1350
1351 if (!success) {
1352 /* Backup info table becomes the main table */
1353 nlog_err(ni, "Failed to update main info table\n");
1354 ni->main_table_ba = ni->backup_table_ba;
1355 ni->backup_table_ba = 0;
1356 return true;
1357 }
1358
1359 /* Adjust mapping_blocks_off */
1360 ni->mapping_blocks_ba = tmp_table_end_ba;
1361
1362 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1363 tmp_table_end_ba - 1);
1364
1365 /*
1366 * Now write main info table at the beginning of management area.
1367 * This operation will generally destroy the original backup info
1368 * table.
1369 */
1370 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1371 tmp_table_start_ba,
1372 &main_table_start_ba,
1373 &main_table_end_ba);
1374 if (!success) {
1375 /* Temporary info table becomes the main table */
1376 ni->main_table_ba = tmp_table_start_ba;
1377 ni->backup_table_ba = 0;
1378
1379 nmbm_mark_tables_clean(ni);
1380
1381 nlog_err(ni, "Failed to update main info table\n");
1382 nmbm_mark_block_color_info_table(ni, tmp_table_start_ba,
1383 tmp_table_end_ba - 1);
1384
1385 return true;
1386 }
1387
1388 /* Main info table has been successfully written, record its offset */
1389 ni->main_table_ba = main_table_start_ba;
1390
1391 nmbm_mark_tables_clean(ni);
1392
1393 nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
1394
1395 /*
1396 * Temporary info table becomes the new backup info table if it's
1397 * not overwritten.
1398 */
1399 if (main_table_end_ba <= tmp_table_start_ba) {
1400 ni->backup_table_ba = tmp_table_start_ba;
1401
1402 nlog_table_creation(ni, false, tmp_table_start_ba,
1403 tmp_table_end_ba);
1404
1405 return true;
1406 }
1407
1408 /* Adjust mapping_blocks_off */
1409 ni->mapping_blocks_ba = main_table_end_ba;
1410
1411 /* Try to reserve spare blocks for new main info table */
1412 success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
1413 ni->info_table_spare_blocks, 0,
1414 ni->mapping_blocks_top_ba -
1415 info_table_erasesize);
1416 if (!success) {
1417 /* There is no spare block. Only main table exists. */
1418 nlog_err(ni, "No room for backup info table\n");
1419 ni->backup_table_ba = 0;
1420 return true;
1421 }
1422
1423 /* Write new backup info table. */
1424 while (write_ba >= main_table_end_ba) {
1425 WATCHDOG_RESET();
1426
1427 success = nmbm_write_info_table(ni, write_ba,
1428 ni->mapping_blocks_top_ba,
1429 &tmp_table_start_ba,
1430 &tmp_table_end_ba);
1431 if (success)
1432 break;
1433
1434 write_ba--;
1435 }
1436
1437 if (!success) {
1438 nlog_err(ni, "No room for backup info table\n");
1439 ni->backup_table_ba = 0;
1440 return true;
1441 }
1442
1443 /* Backup info table has been successfully written, record its offset */
1444 ni->backup_table_ba = tmp_table_start_ba;
1445
1446 /* Adjust mapping_blocks_off */
1447 ni->mapping_blocks_ba = tmp_table_end_ba;
1448
1449 /* Erase spare blocks of main table to clean possible interference data */
1450 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1451
1452 nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
1453
1454 return true;
1455}
1456
1457/*
1458 * nmbm_update_info_table_once - Update info table once
1459 * @ni: NMBM instance structure
1460 * @force: force update
1461 *
1462 * Update both main and backup info table. Return true if at least one info
1463 * table has been successfully written.
1464 * This function only try to update info table once regard less of the result.
1465 */
1466static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
1467{
1468 uint32_t table_start_ba, table_end_ba;
1469 uint32_t main_table_limit;
1470 bool success;
1471
1472 /* Do nothing if there is no change */
1473 if (!nmbm_generate_info_table_cache(ni) && !force)
1474 return true;
1475
1476 /* Check whether both two tables exist */
1477 if (!ni->backup_table_ba) {
1478 main_table_limit = ni->mapping_blocks_top_ba;
1479 goto write_main_table;
1480 }
1481
1482 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1483 ni->mapping_blocks_ba - 1);
1484
1485 /*
1486 * Write backup info table in its current range.
1487 * Note that limit is set to mapping_blocks_top_off to provide as many
1488 * spare blocks as possible for the backup table. If at last
1489 * unmapped blocks are used by backup table, mapping_blocks_off will
1490 * be adjusted.
1491 */
1492 success = nmbm_write_info_table(ni, ni->backup_table_ba,
1493 ni->mapping_blocks_top_ba,
1494 &table_start_ba, &table_end_ba);
1495 if (!success) {
1496 /*
1497 * There is nothing to do if failed to write backup table.
1498 * Write the main table now.
1499 */
1500 nlog_err(ni, "No room for backup table\n");
1501 ni->mapping_blocks_ba = ni->backup_table_ba;
1502 ni->backup_table_ba = 0;
1503 main_table_limit = ni->mapping_blocks_top_ba;
1504 goto write_main_table;
1505 }
1506
1507 /* Backup table is successfully written, record its offset */
1508 ni->backup_table_ba = table_start_ba;
1509
1510 /* Adjust mapping_blocks_off */
1511 ni->mapping_blocks_ba = table_end_ba;
1512
1513 nmbm_mark_tables_clean(ni);
1514
1515 /* The normal limit of main table */
1516 main_table_limit = ni->backup_table_ba;
1517
1518 nlog_table_update(ni, false, table_start_ba, table_end_ba);
1519
1520write_main_table:
1521 if (!ni->main_table_ba)
1522 goto rebuild_tables;
1523
1524 if (!ni->backup_table_ba)
1525 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1526 ni->mapping_blocks_ba - 1);
1527 else
1528 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1529 ni->backup_table_ba - 1);
1530
1531 /* Write main info table in its current range */
1532 success = nmbm_write_info_table(ni, ni->main_table_ba,
1533 main_table_limit, &table_start_ba,
1534 &table_end_ba);
1535 if (!success) {
1536 /* If failed to write main table, go rescue procedure */
1537 if (!ni->backup_table_ba)
1538 goto rebuild_tables;
1539
1540 return nmbm_rescue_main_info_table(ni);
1541 }
1542
1543 /* Main info table is successfully written, record its offset */
1544 ni->main_table_ba = table_start_ba;
1545
1546 /* Adjust mapping_blocks_off */
1547 if (!ni->backup_table_ba)
1548 ni->mapping_blocks_ba = table_end_ba;
1549
1550 nmbm_mark_tables_clean(ni);
1551
1552 nlog_table_update(ni, true, table_start_ba, table_end_ba);
1553
1554 return true;
1555
1556rebuild_tables:
1557 return nmbm_rebuild_info_table(ni);
1558}
1559
1560/*
1561 * nmbm_update_info_table - Update info table
1562 * @ni: NMBM instance structure
1563 *
1564 * Update both main and backup info table. Return true if at least one table
1565 * has been successfully written.
1566 * This function will try to update info table repeatedly until no new bad
1567 * block found during updating.
1568 */
1569static bool nmbm_update_info_table(struct nmbm_instance *ni)
1570{
1571 bool success;
1572
1573 if (ni->protected)
1574 return true;
1575
1576 while (ni->block_state_changed || ni->block_mapping_changed) {
1577 success = nmbm_update_info_table_once(ni, false);
1578 if (!success) {
1579 nlog_err(ni, "Failed to update info table\n");
1580 return false;
1581 }
1582 }
1583
1584 return true;
1585}
1586
1587/*
1588 * nmbm_map_block - Map a bad block to a unused spare block
1589 * @ni: NMBM instance structure
1590 * @lb: logic block addr to map
1591 */
1592static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
1593{
1594 uint32_t pb;
1595 bool success;
1596
1597 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1598 nlog_warn(ni, "No spare unmapped blocks.\n");
1599 return false;
1600 }
1601
1602 success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
1603 ni->mapping_blocks_ba);
1604 if (!success) {
1605 nlog_warn(ni, "No spare unmapped blocks.\n");
1606 nmbm_update_info_table(ni);
1607 ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
1608 return false;
1609 }
1610
1611 ni->block_mapping[lb] = pb;
1612 ni->mapping_blocks_top_ba--;
1613 ni->block_mapping_changed++;
1614
1615 nlog_info(ni, "Logic block %u mapped to physical blcok %u\n", lb, pb);
1616 nmbm_mark_block_color_mapped(ni, pb);
1617
1618 return true;
1619}
1620
1621/*
1622 * nmbm_create_info_table - Create info table(s)
1623 * @ni: NMBM instance structure
1624 *
1625 * This function assumes that the chip has no existing info table(s)
1626 */
1627static bool nmbm_create_info_table(struct nmbm_instance *ni)
1628{
1629 uint32_t lb;
1630 bool success;
1631
1632 /* Set initial mapping_blocks_top_off */
1633 success = nmbm_block_walk(ni, false, ni->signature_ba,
1634 &ni->mapping_blocks_top_ba, 1,
1635 ni->mgmt_start_ba);
1636 if (!success) {
1637 nlog_err(ni, "No room for spare blocks\n");
1638 return false;
1639 }
1640
1641 /* Generate info table cache */
1642 nmbm_generate_info_table_cache(ni);
1643
1644 /* Write info table */
1645 success = nmbm_rebuild_info_table(ni);
1646 if (!success) {
1647 nlog_err(ni, "Failed to build info tables\n");
1648 return false;
1649 }
1650
1651 /* Remap bad block(s) at end of data area */
1652 for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
1653 success = nmbm_map_block(ni, lb);
1654 if (!success)
1655 break;
1656
1657 ni->data_block_count++;
1658 }
1659
1660 /* If state table and/or mapping table changed, update info table. */
1661 success = nmbm_update_info_table(ni);
1662 if (!success)
1663 return false;
1664
1665 return true;
1666}
1667
1668/*
1669 * nmbm_create_new - Create NMBM on a new chip
1670 * @ni: NMBM instance structure
1671 */
1672static bool nmbm_create_new(struct nmbm_instance *ni)
1673{
1674 bool success;
1675
1676 /* Determine the boundary of management blocks */
1677 ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->lower.max_ratio) / NMBM_MGMT_DIV;
1678
1679 if (ni->lower.max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->lower.max_reserved_blocks)
1680 ni->mgmt_start_ba = ni->block_count - ni->lower.max_reserved_blocks;
1681
1682 nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1683 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1684 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba, ni->block_count - 1);
1685
1686 /* Fill block state table & mapping table */
1687 nmbm_scan_badblocks(ni);
1688 nmbm_build_mapping_table(ni);
1689
1690 /* Write signature */
1691 ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
1692 ni->signature.header.version = NMBM_VER;
1693 ni->signature.header.size = sizeof(ni->signature);
1694 ni->signature.nand_size = ni->lower.size;
1695 ni->signature.block_size = ni->lower.erasesize;
1696 ni->signature.page_size = ni->lower.writesize;
1697 ni->signature.spare_size = ni->lower.oobsize;
1698 ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
1699 ni->signature.max_try_count = NMBM_TRY_COUNT;
1700 nmbm_update_checksum(&ni->signature.header);
1701
developer49f853a2021-06-23 17:22:02 +08001702 if (ni->lower.flags & NMBM_F_READ_ONLY) {
1703 nlog_info(ni, "NMBM has been initialized in read-only mode\n");
1704 return true;
1705 }
1706
developer8d16ac22021-05-26 15:32:12 +08001707 success = nmbm_write_signature(ni, ni->mgmt_start_ba,
1708 &ni->signature, &ni->signature_ba);
1709 if (!success) {
1710 nlog_err(ni, "Failed to write signature to a proper offset\n");
1711 return false;
1712 }
1713
1714 nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
1715 ni->signature_ba, ba2addr(ni, ni->signature_ba));
1716 nmbm_mark_block_color_signature(ni, ni->signature_ba);
1717
1718 /* Write info table(s) */
1719 success = nmbm_create_info_table(ni);
1720 if (success) {
1721 nlog_info(ni, "NMBM has been successfully created\n");
1722 return true;
1723 }
1724
1725 return false;
1726}
1727
1728/*
1729 * nmbm_check_info_table_header - Check if a info table header is valid
1730 * @ni: NMBM instance structure
1731 * @data: pointer to the info table header
1732 */
1733static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
1734{
1735 struct nmbm_info_table_header *ifthdr = data;
1736
1737 if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
1738 return false;
1739
1740 if (ifthdr->header.size != ni->info_table_size)
1741 return false;
1742
1743 if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
1744 return false;
1745
1746 if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
1747 return false;
1748
1749 return true;
1750}
1751
1752/*
1753 * nmbm_check_info_table - Check if a whole info table is valid
1754 * @ni: NMBM instance structure
1755 * @start_ba: start block address of this table
1756 * @end_ba: end block address of this table
1757 * @data: pointer to the info table header
1758 * @mapping_blocks_top_ba: return the block address of top remapped block
1759 */
1760static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
1761 uint32_t end_ba, void *data,
1762 uint32_t *mapping_blocks_top_ba)
1763{
1764 struct nmbm_info_table_header *ifthdr = data;
1765 int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
1766 nmbm_bitmap_t *block_state = (nmbm_bitmap_t *)((uintptr_t)data + ifthdr->state_table_off);
1767 uint32_t minimum_mapping_pb = ni->signature_ba;
1768 uint32_t ba;
1769
1770 for (ba = 0; ba < ni->data_block_count; ba++) {
1771 if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
1772 block_mapping[ba] == ni->signature_ba)
1773 return false;
1774
1775 if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
1776 minimum_mapping_pb = block_mapping[ba];
1777 }
1778
1779 for (ba = start_ba; ba < end_ba; ba++) {
1780 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1781 continue;
1782
1783 if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
1784 return false;
1785 }
1786
1787 *mapping_blocks_top_ba = minimum_mapping_pb - 1;
1788
1789 return true;
1790}
1791
1792/*
1793 * nmbm_try_load_info_table - Try to load info table from a address
1794 * @ni: NMBM instance structure
1795 * @ba: start block address of the info table
1796 * @eba: return the block address after end of the table
1797 * @write_count: return the write count of this table
1798 * @mapping_blocks_top_ba: return the block address of top remapped block
1799 * @table_loaded: used to record whether ni->info_table has valid data
1800 */
1801static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1802 uint32_t *eba, uint32_t *write_count,
1803 uint32_t *mapping_blocks_top_ba,
1804 bool table_loaded)
1805{
1806 struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
1807 uint8_t *off = ni->info_table_cache;
1808 uint32_t limit = ba + size2blk(ni, ni->info_table_size);
1809 uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
1810 bool success, checkhdr = true;
1811 int ret;
1812
1813 while (sizeremain && ba < limit) {
1814 WATCHDOG_RESET();
1815
1816 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1817 goto next_block;
1818
1819 if (nmbm_check_bad_phys_block(ni, ba)) {
1820 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1821 goto next_block;
1822 }
1823
1824 chunksize = sizeremain;
1825 if (chunksize > ni->lower.erasesize)
1826 chunksize = ni->lower.erasesize;
1827
1828 /* Assume block with ECC error has no info table data */
1829 ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
1830 if (ret < 0)
1831 goto skip_bad_block;
1832 else if (ret > 0)
1833 return false;
1834
1835 if (checkhdr) {
1836 success = nmbm_check_info_table_header(ni, off);
1837 if (!success)
1838 return false;
1839
1840 start_ba = ba;
1841 checkhdr = false;
1842 }
1843
1844 off += chunksize;
1845 sizeremain -= chunksize;
1846
1847 goto next_block;
1848
1849 skip_bad_block:
1850 /* Only mark bad in memory */
1851 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1852
1853 next_block:
1854 ba++;
1855 }
1856
1857 if (sizeremain)
1858 return false;
1859
1860 success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
1861 if (!success)
1862 return false;
1863
1864 *eba = ba;
1865 *write_count = ifthdr->write_count;
1866
1867 success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
1868 mapping_blocks_top_ba);
1869 if (!success)
1870 return false;
1871
1872 if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
1873 memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
1874 memcpy(ni->block_state,
1875 (uint8_t *)ifthdr + ifthdr->state_table_off,
1876 ni->state_table_size);
1877 memcpy(ni->block_mapping,
1878 (uint8_t *)ifthdr + ifthdr->mapping_table_off,
1879 ni->mapping_table_size);
1880 ni->info_table.write_count = ifthdr->write_count;
1881 }
1882
1883 return true;
1884}
1885
1886/*
1887 * nmbm_search_info_table - Search info table from specific address
1888 * @ni: NMBM instance structure
1889 * @ba: start block address to search
1890 * @limit: highest block address allowed for searching
1891 * @table_start_ba: return the start block address of this table
1892 * @table_end_ba: return the block address after end of this table
1893 * @write_count: return the write count of this table
1894 * @mapping_blocks_top_ba: return the block address of top remapped block
1895 * @table_loaded: used to record whether ni->info_table has valid data
1896 */
1897static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
1898 uint32_t limit, uint32_t *table_start_ba,
1899 uint32_t *table_end_ba,
1900 uint32_t *write_count,
1901 uint32_t *mapping_blocks_top_ba,
1902 bool table_loaded)
1903{
1904 bool success;
1905
1906 while (ba < limit - size2blk(ni, ni->info_table_size)) {
1907 WATCHDOG_RESET();
1908
1909 success = nmbm_try_load_info_table(ni, ba, table_end_ba,
1910 write_count,
1911 mapping_blocks_top_ba,
1912 table_loaded);
1913 if (success) {
1914 *table_start_ba = ba;
1915 return true;
1916 }
1917
1918 ba++;
1919 }
1920
1921 return false;
1922}
1923
1924/*
1925 * nmbm_load_info_table - Load info table(s) from a chip
1926 * @ni: NMBM instance structure
1927 * @ba: start block address to search info table
1928 * @limit: highest block address allowed for searching
1929 */
1930static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1931 uint32_t limit)
1932{
1933 uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
1934 uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
1935 uint32_t main_table_write_count, backup_table_write_count;
1936 uint32_t i;
1937 bool success;
1938
1939 /* Set initial value */
1940 ni->main_table_ba = 0;
1941 ni->backup_table_ba = 0;
1942 ni->info_table.write_count = 0;
1943 ni->mapping_blocks_top_ba = ni->signature_ba - 1;
1944 ni->data_block_count = ni->signature.mgmt_start_pb;
1945
1946 /* Find first info table */
1947 success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
1948 &main_table_end_ba, &main_table_write_count,
1949 &main_mapping_blocks_top_ba, false);
1950 if (!success) {
1951 nlog_warn(ni, "No valid info table found\n");
1952 return false;
1953 }
1954
1955 table_end_ba = main_table_end_ba;
1956
1957 nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
1958 main_table_end_ba);
1959
1960 /* Find second info table */
1961 success = nmbm_search_info_table(ni, main_table_end_ba, limit,
1962 &ni->backup_table_ba, &backup_table_end_ba,
1963 &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
1964 if (!success) {
1965 nlog_warn(ni, "Second info table not found\n");
1966 } else {
1967 table_end_ba = backup_table_end_ba;
1968
1969 nlog_table_found(ni, false, backup_table_write_count,
1970 ni->backup_table_ba, backup_table_end_ba);
1971 }
1972
1973 /* Pick mapping_blocks_top_ba */
1974 if (!ni->backup_table_ba) {
1975 ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
1976 } else {
1977 if (main_table_write_count >= backup_table_write_count)
1978 ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
1979 else
1980 ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
1981 }
1982
1983 /* Set final mapping_blocks_ba */
1984 ni->mapping_blocks_ba = table_end_ba;
1985
1986 /* Set final data_block_count */
1987 for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
1988 if (ni->block_mapping[i - 1] >= 0) {
1989 ni->data_block_count = i;
1990 break;
1991 }
1992 }
1993
1994 /* Debug purpose: mark mapped blocks and bad blocks */
1995 for (i = 0; i < ni->data_block_count; i++) {
1996 if (ni->block_mapping[i] > ni->mapping_blocks_top_ba)
1997 nmbm_mark_block_color_mapped(ni, ni->block_mapping[i]);
1998 }
1999
2000 for (i = 0; i < ni->block_count; i++) {
2001 if (nmbm_get_block_state(ni, i) == BLOCK_ST_BAD)
2002 nmbm_mark_block_color_bad(ni, i);
2003 }
2004
2005 /* Regenerate the info table cache from the final selected info table */
2006 nmbm_generate_info_table_cache(ni);
2007
developer49f853a2021-06-23 17:22:02 +08002008 if (ni->lower.flags & NMBM_F_READ_ONLY)
2009 return true;
2010
developer8d16ac22021-05-26 15:32:12 +08002011 /*
2012 * If only one table exists, try to write another table.
2013 * If two tables have different write count, try to update info table
2014 */
2015 if (!ni->backup_table_ba) {
2016 success = nmbm_rescue_single_info_table(ni);
2017 } else if (main_table_write_count != backup_table_write_count) {
2018 /* Mark state & mapping tables changed */
2019 ni->block_state_changed = 1;
2020 ni->block_mapping_changed = 1;
2021
2022 success = nmbm_update_single_info_table(ni,
2023 main_table_write_count < backup_table_write_count);
2024 } else {
2025 success = true;
2026 }
2027
2028 /*
2029 * If there is no spare unmapped blocks, or still only one table
2030 * exists, set the chip to read-only
2031 */
2032 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
2033 nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
2034 ni->protected = 1;
2035 } else if (!success) {
2036 nlog_warn(ni, "Only one info table found. Device is now read-only\n");
2037 ni->protected = 1;
2038 }
2039
2040 return true;
2041}
2042
2043/*
2044 * nmbm_load_existing - Load NMBM from a new chip
2045 * @ni: NMBM instance structure
2046 */
2047static bool nmbm_load_existing(struct nmbm_instance *ni)
2048{
2049 bool success;
2050
2051 /* Calculate the boundary of management blocks */
2052 ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
2053
2054 nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
2055 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
2056 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
2057 ni->signature_ba - 1);
2058
2059 /* Look for info table(s) */
2060 success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
2061 ni->signature_ba);
2062 if (success) {
developer49f853a2021-06-23 17:22:02 +08002063 nlog_info(ni, "NMBM has been successfully attached %s\n",
2064 (ni->lower.flags & NMBM_F_READ_ONLY) ? "in read-only mode" : "");
developer8d16ac22021-05-26 15:32:12 +08002065 return true;
2066 }
2067
2068 if (!(ni->lower.flags & NMBM_F_CREATE))
2069 return false;
2070
2071 /* Fill block state table & mapping table */
2072 nmbm_scan_badblocks(ni);
2073 nmbm_build_mapping_table(ni);
2074
developer49f853a2021-06-23 17:22:02 +08002075 if (ni->lower.flags & NMBM_F_READ_ONLY) {
2076 nlog_info(ni, "NMBM has been initialized in read-only mode\n");
2077 return true;
2078 }
2079
developer8d16ac22021-05-26 15:32:12 +08002080 /* Write info table(s) */
2081 success = nmbm_create_info_table(ni);
2082 if (success) {
2083 nlog_info(ni, "NMBM has been successfully created\n");
2084 return true;
2085 }
2086
2087 return false;
2088}
2089
2090/*
2091 * nmbm_find_signature - Find signature in the lower NAND chip
2092 * @ni: NMBM instance structure
2093 * @signature_ba: used for storing block address of the signature
2094 * @signature_ba: return the actual block address of signature block
2095 *
2096 * Find a valid signature from a specific range in the lower NAND chip,
2097 * from bottom (highest address) to top (lowest address)
2098 *
2099 * Return true if found.
2100 */
2101static bool nmbm_find_signature(struct nmbm_instance *ni,
2102 struct nmbm_signature *signature,
2103 uint32_t *signature_ba)
2104{
2105 struct nmbm_signature sig;
2106 uint64_t off, addr;
2107 uint32_t block_count, ba, limit;
2108 bool success;
2109 int ret;
2110
2111 /* Calculate top and bottom block address */
2112 block_count = ni->lower.size >> ni->erasesize_shift;
2113 ba = block_count;
2114 limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->lower.max_ratio);
2115 if (ni->lower.max_reserved_blocks && block_count - limit > ni->lower.max_reserved_blocks)
2116 limit = block_count - ni->lower.max_reserved_blocks;
2117
2118 while (ba >= limit) {
2119 WATCHDOG_RESET();
2120
2121 ba--;
2122 addr = ba2addr(ni, ba);
2123
2124 if (nmbm_check_bad_phys_block(ni, ba))
2125 continue;
2126
2127 /* Check every page.
2128 * As long as at leaset one page contains valid signature,
2129 * the block is treated as a valid signature block.
2130 */
2131 for (off = 0; off < ni->lower.erasesize;
2132 off += ni->lower.writesize) {
2133 WATCHDOG_RESET();
2134
2135 ret = nmbn_read_data(ni, addr + off, &sig,
2136 sizeof(sig));
2137 if (ret)
2138 continue;
2139
2140 /* Check for header size and checksum */
2141 success = nmbm_check_header(&sig, sizeof(sig));
2142 if (!success)
2143 continue;
2144
2145 /* Check for header magic */
2146 if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
2147 /* Found it */
2148 memcpy(signature, &sig, sizeof(sig));
2149 *signature_ba = ba;
2150 return true;
2151 }
2152 }
2153 };
2154
2155 return false;
2156}
2157
2158/*
2159 * is_power_of_2_u64 - Check whether a 64-bit integer is power of 2
2160 * @n: number to check
2161 */
2162static bool is_power_of_2_u64(uint64_t n)
2163{
2164 return (n != 0 && ((n & (n - 1)) == 0));
2165}
2166
2167/*
2168 * nmbm_check_lower_members - Validate the members of lower NAND device
2169 * @nld: Lower NAND chip structure
2170 */
2171static bool nmbm_check_lower_members(struct nmbm_lower_device *nld)
2172{
2173
2174 if (!nld->size || !is_power_of_2_u64(nld->size)) {
2175 nmbm_log_lower(nld, NMBM_LOG_ERR,
2176 "Chip size %llu is not valid\n", nld->size);
2177 return false;
2178 }
2179
2180 if (!nld->erasesize || !is_power_of_2(nld->erasesize)) {
2181 nmbm_log_lower(nld, NMBM_LOG_ERR,
2182 "Block size %u is not valid\n", nld->erasesize);
2183 return false;
2184 }
2185
2186 if (!nld->writesize || !is_power_of_2(nld->writesize)) {
2187 nmbm_log_lower(nld, NMBM_LOG_ERR,
2188 "Page size %u is not valid\n", nld->writesize);
2189 return false;
2190 }
2191
developer6e6a3d12022-05-09 15:17:32 +08002192 if (!nld->oobsize) {
developer8d16ac22021-05-26 15:32:12 +08002193 nmbm_log_lower(nld, NMBM_LOG_ERR,
2194 "Page spare size %u is not valid\n", nld->oobsize);
2195 return false;
2196 }
2197
developer49f853a2021-06-23 17:22:02 +08002198 if (!nld->read_page) {
2199 nmbm_log_lower(nld, NMBM_LOG_ERR, "read_page() is required\n");
2200 return false;
2201 }
2202
2203 if (!(nld->flags & NMBM_F_READ_ONLY) && (!nld->write_page || !nld->erase_block)) {
developer8d16ac22021-05-26 15:32:12 +08002204 nmbm_log_lower(nld, NMBM_LOG_ERR,
developer49f853a2021-06-23 17:22:02 +08002205 "write_page() and erase_block() are required\n");
developer8d16ac22021-05-26 15:32:12 +08002206 return false;
2207 }
2208
2209 /* Data sanity check */
2210 if (!nld->max_ratio)
2211 nld->max_ratio = 1;
2212
2213 if (nld->max_ratio >= NMBM_MGMT_DIV - 1) {
2214 nmbm_log_lower(nld, NMBM_LOG_ERR,
2215 "max ratio %u is invalid\n", nld->max_ratio);
2216 return false;
2217 }
2218
2219 if (nld->max_reserved_blocks && nld->max_reserved_blocks < NMBM_MGMT_BLOCKS_MIN) {
2220 nmbm_log_lower(nld, NMBM_LOG_ERR,
2221 "max reserved blocks %u is too small\n", nld->max_reserved_blocks);
2222 return false;
2223 }
2224
2225 return true;
2226}
2227
2228/*
2229 * nmbm_calc_structure_size - Calculate the instance structure size
2230 * @nld: NMBM lower device structure
2231 */
2232size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld)
2233{
2234 uint32_t state_table_size, mapping_table_size, info_table_size;
2235 uint32_t block_count;
2236
2237 block_count = nmbm_lldiv(nld->size, nld->erasesize);
2238
2239 /* Calculate info table size */
2240 state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2241 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2242 mapping_table_size = block_count * sizeof(int32_t);
2243
2244 info_table_size = NMBM_ALIGN(sizeof(struct nmbm_info_table_header),
2245 nld->writesize);
2246 info_table_size += NMBM_ALIGN(state_table_size, nld->writesize);
2247 info_table_size += NMBM_ALIGN(mapping_table_size, nld->writesize);
2248
2249 return info_table_size + state_table_size + mapping_table_size +
2250 nld->writesize + nld->oobsize + sizeof(struct nmbm_instance);
2251}
2252
2253/*
2254 * nmbm_init_structure - Initialize members of instance structure
2255 * @ni: NMBM instance structure
2256 */
2257static void nmbm_init_structure(struct nmbm_instance *ni)
2258{
2259 uint32_t pages_per_block, blocks_per_chip;
2260 uintptr_t ptr;
2261
2262 pages_per_block = ni->lower.erasesize / ni->lower.writesize;
2263 blocks_per_chip = nmbm_lldiv(ni->lower.size, ni->lower.erasesize);
2264
2265 ni->rawpage_size = ni->lower.writesize + ni->lower.oobsize;
2266 ni->rawblock_size = pages_per_block * ni->rawpage_size;
2267 ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
2268
2269 ni->writesize_mask = ni->lower.writesize - 1;
2270 ni->erasesize_mask = ni->lower.erasesize - 1;
2271
2272 ni->writesize_shift = ffs(ni->lower.writesize) - 1;
2273 ni->erasesize_shift = ffs(ni->lower.erasesize) - 1;
2274
2275 /* Calculate number of block this chip */
2276 ni->block_count = ni->lower.size >> ni->erasesize_shift;
2277
2278 /* Calculate info table size */
2279 ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2280 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2281 ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
2282
2283 ni->info_table_size = NMBM_ALIGN(sizeof(ni->info_table),
2284 ni->lower.writesize);
2285 ni->info_table.state_table_off = ni->info_table_size;
2286
2287 ni->info_table_size += NMBM_ALIGN(ni->state_table_size,
2288 ni->lower.writesize);
2289 ni->info_table.mapping_table_off = ni->info_table_size;
2290
2291 ni->info_table_size += NMBM_ALIGN(ni->mapping_table_size,
2292 ni->lower.writesize);
2293
2294 ni->info_table_spare_blocks = nmbm_get_spare_block_count(
2295 size2blk(ni, ni->info_table_size));
2296
2297 /* Assign memory to members */
2298 ptr = (uintptr_t)ni + sizeof(*ni);
2299
2300 ni->info_table_cache = (void *)ptr;
2301 ptr += ni->info_table_size;
2302
2303 ni->block_state = (void *)ptr;
2304 ptr += ni->state_table_size;
2305
2306 ni->block_mapping = (void *)ptr;
2307 ptr += ni->mapping_table_size;
2308
2309 ni->page_cache = (uint8_t *)ptr;
2310
2311 /* Initialize block state table */
2312 ni->block_state_changed = 0;
2313 memset(ni->block_state, 0xff, ni->state_table_size);
2314
2315 /* Initialize block mapping table */
2316 ni->block_mapping_changed = 0;
2317}
2318
2319/*
2320 * nmbm_attach - Attach to a lower device
2321 * @nld: NMBM lower device structure
2322 * @ni: NMBM instance structure
2323 */
2324int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni)
2325{
2326 bool success;
2327
2328 if (!nld || !ni)
2329 return -EINVAL;
2330
2331 /* Set default log level */
2332 ni->log_display_level = NMBM_DEFAULT_LOG_LEVEL;
2333
2334 /* Check lower members */
2335 success = nmbm_check_lower_members(nld);
2336 if (!success)
2337 return -EINVAL;
2338
2339 /* Initialize NMBM instance */
2340 memcpy(&ni->lower, nld, sizeof(struct nmbm_lower_device));
2341 nmbm_init_structure(ni);
2342
2343 success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
2344 if (!success) {
2345 if (!(nld->flags & NMBM_F_CREATE)) {
2346 nlog_err(ni, "Signature not found\n");
2347 return -ENODEV;
2348 }
2349
2350 success = nmbm_create_new(ni);
2351 if (!success)
2352 return -ENODEV;
2353
2354 return 0;
2355 }
2356
2357 nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
2358 ni->signature_ba, ba2addr(ni, ni->signature_ba));
2359 nmbm_mark_block_color_signature(ni, ni->signature_ba);
2360
2361 if (ni->signature.header.version != NMBM_VER) {
2362 nlog_err(ni, "NMBM version %u.%u is not supported\n",
2363 NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
2364 NMBM_VERSION_MINOR_GET(ni->signature.header.version));
2365 return -EINVAL;
2366 }
2367
2368 if (ni->signature.nand_size != nld->size ||
2369 ni->signature.block_size != nld->erasesize ||
2370 ni->signature.page_size != nld->writesize ||
2371 ni->signature.spare_size != nld->oobsize) {
2372 nlog_err(ni, "NMBM configuration mismatch\n");
2373 return -EINVAL;
2374 }
2375
2376 success = nmbm_load_existing(ni);
2377 if (!success)
2378 return -ENODEV;
2379
2380 return 0;
2381}
2382
2383/*
2384 * nmbm_detach - Detach from a lower device, and save all tables
2385 * @ni: NMBM instance structure
2386 */
2387int nmbm_detach(struct nmbm_instance *ni)
2388{
2389 if (!ni)
2390 return -EINVAL;
2391
developer49f853a2021-06-23 17:22:02 +08002392 if (!(ni->lower.flags & NMBM_F_READ_ONLY))
2393 nmbm_update_info_table(ni);
developer8d16ac22021-05-26 15:32:12 +08002394
2395 nmbm_mark_block_color_normal(ni, 0, ni->block_count - 1);
2396
2397 return 0;
2398}
2399
2400/*
2401 * nmbm_erase_logic_block - Erase a logic block
2402 * @ni: NMBM instance structure
2403 * @nmbm_erase_logic_block: logic block address
2404 *
2405 * Logic block will be mapped to physical block before erasing.
2406 * Bad block found during erasinh will be remapped to a good block if there is
2407 * still at least one good spare block available.
2408 */
2409static int nmbm_erase_logic_block(struct nmbm_instance *ni, uint32_t block_addr)
2410{
2411 uint32_t pb;
2412 bool success;
2413
2414retry:
2415 /* Map logic block to physical block */
2416 pb = ni->block_mapping[block_addr];
2417
2418 /* Whether the logic block is good (has valid mapping) */
2419 if ((int32_t)pb < 0) {
2420 nlog_debug(ni, "Logic block %u is a bad block\n", block_addr);
2421 return -EIO;
2422 }
2423
2424 /* Remap logic block if current physical block is a bad block */
2425 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD ||
2426 nmbm_get_block_state(ni, pb) == BLOCK_ST_NEED_REMAP)
2427 goto remap_logic_block;
developer4f9017d2021-06-16 17:18:47 +08002428
2429 /* Insurance to detect unexpected bad block marked by user */
2430 if (nmbm_check_bad_phys_block(ni, pb)) {
2431 nlog_warn(ni, "Found unexpected bad block possibly marked by user\n");
2432 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2433 goto remap_logic_block;
2434 }
developer8d16ac22021-05-26 15:32:12 +08002435
developer28a313b2021-06-16 17:23:34 +08002436 success = nmbm_erase_block_and_check(ni, pb);
developer8d16ac22021-05-26 15:32:12 +08002437 if (success)
2438 return 0;
2439
2440 /* Mark bad block */
2441 nmbm_mark_phys_bad_block(ni, pb);
2442 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2443
2444remap_logic_block:
2445 /* Try to assign a new block */
2446 success = nmbm_map_block(ni, block_addr);
2447 if (!success) {
2448 /* Mark logic block unusable, and update info table */
2449 ni->block_mapping[block_addr] = -1;
2450 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2451 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2452 nmbm_update_info_table(ni);
2453 return -EIO;
2454 }
2455
2456 /* Update info table before erasing */
2457 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2458 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2459 nmbm_update_info_table(ni);
2460
2461 goto retry;
2462}
2463
2464/*
2465 * nmbm_erase_block_range - Erase logic blocks
2466 * @ni: NMBM instance structure
2467 * @addr: logic linear address
2468 * @size: erase range
2469 * @failed_addr: return failed block address if error occurs
2470 */
2471int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
2472 uint64_t size, uint64_t *failed_addr)
2473{
2474 uint32_t start_ba, end_ba;
2475 int ret;
2476
2477 if (!ni)
2478 return -EINVAL;
2479
2480 /* Sanity check */
developer49f853a2021-06-23 17:22:02 +08002481 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
developer8d16ac22021-05-26 15:32:12 +08002482 nlog_debug(ni, "Device is forced read-only\n");
2483 return -EROFS;
2484 }
2485
2486 if (addr >= ba2addr(ni, ni->data_block_count)) {
2487 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2488 return -EINVAL;
2489 }
2490
2491 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2492 nlog_err(ni, "Erase range 0xllxu is too large\n", size);
2493 return -EINVAL;
2494 }
2495
2496 if (!size) {
2497 nlog_warn(ni, "No blocks to be erased\n");
2498 return 0;
2499 }
2500
2501 start_ba = addr2ba(ni, addr);
2502 end_ba = addr2ba(ni, addr + size - 1);
2503
2504 while (start_ba <= end_ba) {
2505 WATCHDOG_RESET();
2506
2507 ret = nmbm_erase_logic_block(ni, start_ba);
2508 if (ret) {
2509 if (failed_addr)
2510 *failed_addr = ba2addr(ni, start_ba);
2511 return ret;
2512 }
2513
2514 start_ba++;
2515 }
2516
2517 return 0;
2518}
2519
2520/*
2521 * nmbm_read_logic_page - Read page based on logic address
2522 * @ni: NMBM instance structure
2523 * @addr: logic linear address
2524 * @data: buffer to store main data. optional.
2525 * @oob: buffer to store oob data. optional.
2526 * @mode: read mode
developerd1457c92021-06-16 17:23:18 +08002527 *
2528 * Return 0 for success, positive value for corrected bitflip count,
2529 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002530 */
2531static int nmbm_read_logic_page(struct nmbm_instance *ni, uint64_t addr,
2532 void *data, void *oob, enum nmbm_oob_mode mode)
2533{
2534 uint32_t lb, pb, offset;
2535 uint64_t paddr;
developer8d16ac22021-05-26 15:32:12 +08002536
2537 /* Extract block address and in-block offset */
2538 lb = addr2ba(ni, addr);
2539 offset = addr & ni->erasesize_mask;
2540
2541 /* Map logic block to physical block */
2542 pb = ni->block_mapping[lb];
2543
2544 /* Whether the logic block is good (has valid mapping) */
2545 if ((int32_t)pb < 0) {
2546 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2547 return -EIO;
2548 }
2549
2550 /* Fail if physical block is marked bad */
2551 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2552 return -EIO;
2553
2554 /* Assemble new address */
2555 paddr = ba2addr(ni, pb) + offset;
2556
developer55097772021-06-16 17:23:50 +08002557 return nmbm_read_phys_page(ni, paddr, data, oob, mode);
developer8d16ac22021-05-26 15:32:12 +08002558}
2559
2560/*
2561 * nmbm_read_single_page - Read one page based on logic address
2562 * @ni: NMBM instance structure
2563 * @addr: logic linear address
2564 * @data: buffer to store main data. optional.
2565 * @oob: buffer to store oob data. optional.
2566 * @mode: read mode
developerd1457c92021-06-16 17:23:18 +08002567 *
2568 * Return 0 for success, positive value for corrected bitflip count,
2569 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002570 */
2571int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
2572 void *oob, enum nmbm_oob_mode mode)
2573{
2574 if (!ni)
2575 return -EINVAL;
2576
2577 /* Sanity check */
2578 if (ni->protected) {
2579 nlog_debug(ni, "Device is forced read-only\n");
2580 return -EROFS;
2581 }
2582
2583 if (addr >= ba2addr(ni, ni->data_block_count)) {
2584 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2585 return -EINVAL;
2586 }
2587
2588 return nmbm_read_logic_page(ni, addr, data, oob, mode);
2589}
2590
2591/*
2592 * nmbm_read_range - Read data without oob
2593 * @ni: NMBM instance structure
2594 * @addr: logic linear address
2595 * @size: data size to read
2596 * @data: buffer to store main data to be read
2597 * @mode: read mode
2598 * @retlen: return actual data size read
developerd1457c92021-06-16 17:23:18 +08002599 *
2600 * Return 0 for success, positive value for corrected bitflip count,
2601 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002602 */
2603int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2604 void *data, enum nmbm_oob_mode mode, size_t *retlen)
2605{
2606 uint64_t off = addr;
2607 uint8_t *ptr = data;
2608 size_t sizeremain = size, chunksize, leading;
developerd1457c92021-06-16 17:23:18 +08002609 bool has_ecc_err = false;
2610 int ret, max_bitflips = 0;
developer8d16ac22021-05-26 15:32:12 +08002611
2612 if (!ni)
2613 return -EINVAL;
2614
2615 /* Sanity check */
2616 if (ni->protected) {
2617 nlog_debug(ni, "Device is forced read-only\n");
2618 return -EROFS;
2619 }
2620
2621 if (addr >= ba2addr(ni, ni->data_block_count)) {
2622 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2623 return -EINVAL;
2624 }
2625
2626 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2627 nlog_err(ni, "Read range 0x%llx is too large\n", size);
2628 return -EINVAL;
2629 }
2630
2631 if (!size) {
2632 nlog_warn(ni, "No data to be read\n");
2633 return 0;
2634 }
2635
2636 while (sizeremain) {
2637 WATCHDOG_RESET();
2638
2639 leading = off & ni->writesize_mask;
2640 chunksize = ni->lower.writesize - leading;
2641 if (chunksize > sizeremain)
2642 chunksize = sizeremain;
2643
2644 if (chunksize == ni->lower.writesize) {
2645 ret = nmbm_read_logic_page(ni, off - leading, ptr,
2646 NULL, mode);
developerd1457c92021-06-16 17:23:18 +08002647 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +08002648 break;
2649 } else {
2650 ret = nmbm_read_logic_page(ni, off - leading,
2651 ni->page_cache, NULL,
2652 mode);
developerd1457c92021-06-16 17:23:18 +08002653 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +08002654 break;
2655
2656 memcpy(ptr, ni->page_cache + leading, chunksize);
2657 }
2658
developerd1457c92021-06-16 17:23:18 +08002659 if (ret == -EBADMSG)
2660 has_ecc_err = true;
2661
2662 if (ret > max_bitflips)
2663 max_bitflips = ret;
2664
developer8d16ac22021-05-26 15:32:12 +08002665 off += chunksize;
2666 ptr += chunksize;
2667 sizeremain -= chunksize;
2668 }
2669
2670 if (retlen)
2671 *retlen = size - sizeremain;
2672
developerd1457c92021-06-16 17:23:18 +08002673 if (ret < 0 && ret != -EBADMSG)
2674 return ret;
2675
2676 if (has_ecc_err)
2677 return -EBADMSG;
2678
2679 return max_bitflips;
developer8d16ac22021-05-26 15:32:12 +08002680}
2681
2682/*
2683 * nmbm_write_logic_page - Read page based on logic address
2684 * @ni: NMBM instance structure
2685 * @addr: logic linear address
2686 * @data: buffer contains main data. optional.
2687 * @oob: buffer contains oob data. optional.
2688 * @mode: write mode
2689 */
2690static int nmbm_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
2691 const void *data, const void *oob,
2692 enum nmbm_oob_mode mode)
2693{
2694 uint32_t lb, pb, offset;
2695 uint64_t paddr;
2696 bool success;
2697
2698 /* Extract block address and in-block offset */
2699 lb = addr2ba(ni, addr);
2700 offset = addr & ni->erasesize_mask;
2701
2702 /* Map logic block to physical block */
2703 pb = ni->block_mapping[lb];
2704
2705 /* Whether the logic block is good (has valid mapping) */
2706 if ((int32_t)pb < 0) {
2707 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2708 return -EIO;
2709 }
2710
2711 /* Fail if physical block is marked bad */
2712 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2713 return -EIO;
2714
2715 /* Assemble new address */
2716 paddr = ba2addr(ni, pb) + offset;
2717
2718 success = nmbm_write_phys_page(ni, paddr, data, oob, mode);
2719 if (success)
2720 return 0;
2721
2722 /*
2723 * Do not remap bad block here. Just mark this block in state table.
2724 * Remap this block on erasing.
2725 */
2726 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2727 nmbm_update_info_table(ni);
2728
2729 return -EIO;
2730}
2731
2732/*
2733 * nmbm_write_single_page - Write one page based on logic address
2734 * @ni: NMBM instance structure
2735 * @addr: logic linear address
2736 * @data: buffer contains main data. optional.
2737 * @oob: buffer contains oob data. optional.
2738 * @mode: write mode
2739 */
2740int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
2741 const void *data, const void *oob,
2742 enum nmbm_oob_mode mode)
2743{
2744 if (!ni)
2745 return -EINVAL;
2746
2747 /* Sanity check */
developer49f853a2021-06-23 17:22:02 +08002748 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
developer8d16ac22021-05-26 15:32:12 +08002749 nlog_debug(ni, "Device is forced read-only\n");
2750 return -EROFS;
2751 }
2752
2753 if (addr >= ba2addr(ni, ni->data_block_count)) {
2754 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2755 return -EINVAL;
2756 }
2757
2758 return nmbm_write_logic_page(ni, addr, data, oob, mode);
2759}
2760
2761/*
2762 * nmbm_write_range - Write data without oob
2763 * @ni: NMBM instance structure
2764 * @addr: logic linear address
2765 * @size: data size to write
2766 * @data: buffer contains data to be written
2767 * @mode: write mode
2768 * @retlen: return actual data size written
2769 */
2770int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2771 const void *data, enum nmbm_oob_mode mode,
2772 size_t *retlen)
2773{
2774 uint64_t off = addr;
2775 const uint8_t *ptr = data;
2776 size_t sizeremain = size, chunksize, leading;
2777 int ret;
2778
2779 if (!ni)
2780 return -EINVAL;
2781
2782 /* Sanity check */
developer49f853a2021-06-23 17:22:02 +08002783 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
developer8d16ac22021-05-26 15:32:12 +08002784 nlog_debug(ni, "Device is forced read-only\n");
2785 return -EROFS;
2786 }
2787
2788 if (addr >= ba2addr(ni, ni->data_block_count)) {
2789 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2790 return -EINVAL;
2791 }
2792
2793 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2794 nlog_err(ni, "Write size 0x%zx is too large\n", size);
2795 return -EINVAL;
2796 }
2797
2798 if (!size) {
2799 nlog_warn(ni, "No data to be written\n");
2800 return 0;
2801 }
2802
2803 while (sizeremain) {
2804 WATCHDOG_RESET();
2805
2806 leading = off & ni->writesize_mask;
2807 chunksize = ni->lower.writesize - leading;
2808 if (chunksize > sizeremain)
2809 chunksize = sizeremain;
2810
2811 if (chunksize == ni->lower.writesize) {
2812 ret = nmbm_write_logic_page(ni, off - leading, ptr,
2813 NULL, mode);
2814 if (ret)
2815 break;
2816 } else {
2817 memset(ni->page_cache, 0xff, leading);
2818 memcpy(ni->page_cache + leading, ptr, chunksize);
2819
2820 ret = nmbm_write_logic_page(ni, off - leading,
2821 ni->page_cache, NULL,
2822 mode);
2823 if (ret)
2824 break;
2825 }
2826
2827 off += chunksize;
2828 ptr += chunksize;
2829 sizeremain -= chunksize;
2830 }
2831
2832 if (retlen)
2833 *retlen = size - sizeremain;
2834
2835 return ret;
2836}
2837
2838/*
2839 * nmbm_check_bad_block - Check whether a logic block is usable
2840 * @ni: NMBM instance structure
2841 * @addr: logic linear address
2842 */
2843int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr)
2844{
2845 uint32_t lb, pb;
2846
2847 if (!ni)
2848 return -EINVAL;
2849
2850 if (addr >= ba2addr(ni, ni->data_block_count)) {
2851 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2852 return -EINVAL;
2853 }
2854
2855 lb = addr2ba(ni, addr);
2856
2857 /* Map logic block to physical block */
2858 pb = ni->block_mapping[lb];
2859
2860 if ((int32_t)pb < 0)
2861 return 1;
2862
2863 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2864 return 1;
2865
2866 return 0;
2867}
2868
2869/*
2870 * nmbm_mark_bad_block - Mark a logic block unusable
2871 * @ni: NMBM instance structure
2872 * @addr: logic linear address
2873 */
2874int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr)
2875{
2876 uint32_t lb, pb;
2877
2878 if (!ni)
2879 return -EINVAL;
2880
developer49f853a2021-06-23 17:22:02 +08002881 /* Sanity check */
2882 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
2883 nlog_debug(ni, "Device is forced read-only\n");
2884 return -EROFS;
2885 }
2886
developer8d16ac22021-05-26 15:32:12 +08002887 if (addr >= ba2addr(ni, ni->data_block_count)) {
2888 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2889 return -EINVAL;
2890 }
2891
2892 lb = addr2ba(ni, addr);
2893
2894 /* Map logic block to physical block */
2895 pb = ni->block_mapping[lb];
2896
2897 if ((int32_t)pb < 0)
2898 return 0;
2899
2900 ni->block_mapping[lb] = -1;
2901 nmbm_mark_phys_bad_block(ni, pb);
2902 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2903 nmbm_update_info_table(ni);
2904
2905 return 0;
2906}
2907
2908/*
2909 * nmbm_get_avail_size - Get available user data size
2910 * @ni: NMBM instance structure
2911 */
2912uint64_t nmbm_get_avail_size(struct nmbm_instance *ni)
2913{
2914 if (!ni)
2915 return 0;
2916
2917 return (uint64_t)ni->data_block_count << ni->erasesize_shift;
2918}
2919
2920/*
2921 * nmbm_get_lower_device - Get lower device structure
2922 * @ni: NMBM instance structure
2923 * @nld: pointer to hold the data of lower device structure
2924 */
2925int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld)
2926{
2927 if (!ni)
2928 return -EINVAL;
2929
2930 if (nld)
2931 memcpy(nld, &ni->lower, sizeof(*nld));
2932
2933 return 0;
2934}
2935
2936#include "nmbm-debug.inl"