blob: 9dc4a1068534c98588f833607ebc45d8b31190a4 [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2021 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "nmbm-private.h"
9
10#include "nmbm-debug.h"
11
12#define NMBM_VER_MAJOR 1
13#define NMBM_VER_MINOR 0
14#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
15 NMBM_VER_MINOR)
16
17#define NMBM_ALIGN(v, a) (((v) + (a) - 1) & ~((a) - 1))
18
19/*****************************************************************************/
20/* Logging related functions */
21/*****************************************************************************/
22
23/*
24 * nmbm_log_lower - Print log using OS specific routine
25 * @nld: NMBM lower device structure
26 * @level: log level
27 * @fmt: format string
28 */
29static void nmbm_log_lower(struct nmbm_lower_device *nld,
30 enum nmbm_log_category level, const char *fmt, ...)
31{
32 va_list ap;
33
34 if (!nld->logprint)
35 return;
36
37 va_start(ap, fmt);
38 nld->logprint(nld->arg, level, fmt, ap);
39 va_end(ap);
40}
41
42/*
43 * nmbm_log - Print log using OS specific routine
44 * @ni: NMBM instance structure
45 * @level: log level
46 * @fmt: format string
47 */
48static void nmbm_log(struct nmbm_instance *ni, enum nmbm_log_category level,
49 const char *fmt, ...)
50{
51 va_list ap;
52
53 if (!ni)
54 return;
55
56 if (!ni->lower.logprint || level < ni->log_display_level)
57 return;
58
59 va_start(ap, fmt);
60 ni->lower.logprint(ni->lower.arg, level, fmt, ap);
61 va_end(ap);
62}
63
64/*
65 * nmbm_set_log_level - Set log display level
66 * @ni: NMBM instance structure
67 * @level: log display level
68 */
69enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
70 enum nmbm_log_category level)
71{
72 enum nmbm_log_category old;
73
74 if (!ni)
75 return __NMBM_LOG_MAX;
76
77 old = ni->log_display_level;
78 ni->log_display_level = level;
79 return old;
80}
81
82/*
83 * nlog_table_creation - Print log of table creation event
84 * @ni: NMBM instance structure
85 * @main_table: whether the table is main info table
86 * @start_ba: start block address of the table
87 * @end_ba: block address after the end of the table
88 */
89static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
90 uint32_t start_ba, uint32_t end_ba)
91{
92 if (start_ba == end_ba - 1)
93 nlog_info(ni, "%s info table has been written to block %u\n",
94 main_table ? "Main" : "Backup", start_ba);
95 else
96 nlog_info(ni, "%s info table has been written to block %u-%u\n",
97 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
98
99 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
100}
101
102/*
103 * nlog_table_update - Print log of table update event
104 * @ni: NMBM instance structure
105 * @main_table: whether the table is main info table
106 * @start_ba: start block address of the table
107 * @end_ba: block address after the end of the table
108 */
109static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
110 uint32_t start_ba, uint32_t end_ba)
111{
112 if (start_ba == end_ba - 1)
113 nlog_debug(ni, "%s info table has been updated in block %u\n",
114 main_table ? "Main" : "Backup", start_ba);
115 else
116 nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
117 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
118
119 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
120}
121
122/*
123 * nlog_table_found - Print log of table found event
124 * @ni: NMBM instance structure
125 * @first_table: whether the table is first found info table
126 * @write_count: write count of the info table
127 * @start_ba: start block address of the table
128 * @end_ba: block address after the end of the table
129 */
130static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
131 uint32_t write_count, uint32_t start_ba,
132 uint32_t end_ba)
133{
134 if (start_ba == end_ba - 1)
135 nlog_info(ni, "%s info table with writecount %u found in block %u\n",
136 first_table ? "First" : "Second", write_count,
137 start_ba);
138 else
139 nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
140 first_table ? "First" : "Second", write_count,
141 start_ba, end_ba - 1);
142
143 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
144}
145
146/*****************************************************************************/
147/* Address conversion functions */
148/*****************************************************************************/
149
150/*
151 * addr2ba - Convert a linear address to block address
152 * @ni: NMBM instance structure
153 * @addr: Linear address
154 */
155static uint32_t addr2ba(struct nmbm_instance *ni, uint64_t addr)
156{
157 return addr >> ni->erasesize_shift;
158}
159
160/*
161 * ba2addr - Convert a block address to linear address
162 * @ni: NMBM instance structure
163 * @ba: Block address
164 */
165static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
166{
167 return (uint64_t)ba << ni->erasesize_shift;
168}
169/*
170 * size2blk - Get minimum required blocks for storing specific size of data
171 * @ni: NMBM instance structure
172 * @size: size for storing
173 */
174static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
175{
176 return (size + ni->lower.erasesize - 1) >> ni->erasesize_shift;
177}
178
179/*****************************************************************************/
180/* High level NAND chip APIs */
181/*****************************************************************************/
182
183/*
184 * nmbm_reset_chip - Reset NAND device
185 * @nld: Lower NAND chip structure
186 */
187static void nmbm_reset_chip(struct nmbm_instance *ni)
188{
189 if (ni->lower.reset_chip)
190 ni->lower.reset_chip(ni->lower.arg);
191}
192
193/*
194 * nmbm_read_phys_page - Read page with retry
195 * @ni: NMBM instance structure
196 * @addr: linear address where the data will be read from
197 * @data: the main data to be read
198 * @oob: the oob data to be read
199 * @mode: mode for processing oob data
200 *
201 * Read a page for at most NMBM_TRY_COUNT times.
202 *
developerd1457c92021-06-16 17:23:18 +0800203 * Return 0 for success, positive value for corrected bitflip count,
204 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +0800205 */
206static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
207 void *data, void *oob, enum nmbm_oob_mode mode)
208{
209 int tries, ret;
210
211 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
212 ret = ni->lower.read_page(ni->lower.arg, addr, data, oob, mode);
developerd1457c92021-06-16 17:23:18 +0800213 if (ret >= 0)
214 return ret;
developer8d16ac22021-05-26 15:32:12 +0800215
216 nmbm_reset_chip(ni);
217 }
218
developerd1457c92021-06-16 17:23:18 +0800219 if (ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +0800220 nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
221
222 return ret;
223}
224
225/*
226 * nmbm_write_phys_page - Write page with retry
227 * @ni: NMBM instance structure
228 * @addr: linear address where the data will be written to
229 * @data: the main data to be written
230 * @oob: the oob data to be written
231 * @mode: mode for processing oob data
232 *
233 * Write a page for at most NMBM_TRY_COUNT times.
234 */
235static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
236 const void *data, const void *oob,
237 enum nmbm_oob_mode mode)
238{
239 int tries, ret;
240
developer49f853a2021-06-23 17:22:02 +0800241 if (ni->lower.flags & NMBM_F_READ_ONLY) {
242 nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
243 return false;
244 }
245
developer8d16ac22021-05-26 15:32:12 +0800246 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
247 ret = ni->lower.write_page(ni->lower.arg, addr, data, oob, mode);
248 if (!ret)
249 return true;
250
251 nmbm_reset_chip(ni);
252 }
253
254 nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
255
256 return false;
257}
258
259/*
developerd8cf71c2023-06-20 19:10:04 +0800260 * nmbm_panic_write_phys_page - Panic write page with retry
261 * @ni: NMBM instance structure
262 * @addr: linear address where the data will be written to
263 * @data: the main data to be written
264 *
265 * Write a page for at most NMBM_TRY_COUNT times.
266 */
267static bool nmbm_panic_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
268 const void *data)
269{
270 int tries, ret;
271
272 if (ni->lower.flags & NMBM_F_READ_ONLY) {
273 nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
274 return false;
275 }
276
277 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
278 ret = ni->lower.panic_write_page(ni->lower.arg, addr, data);
279 if (!ret)
280 return true;
281
282 nmbm_reset_chip(ni);
283 }
284
285 nlog_err(ni, "Panic page write failed at address 0x%08llx\n", addr);
286
287 return false;
288}
289
290/*
developer8d16ac22021-05-26 15:32:12 +0800291 * nmbm_erase_phys_block - Erase a block with retry
292 * @ni: NMBM instance structure
293 * @addr: Linear address
294 *
295 * Erase a block for at most NMBM_TRY_COUNT times.
296 */
297static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
298{
299 int tries, ret;
300
developer49f853a2021-06-23 17:22:02 +0800301 if (ni->lower.flags & NMBM_F_READ_ONLY) {
302 nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
303 return false;
304 }
305
developer8d16ac22021-05-26 15:32:12 +0800306 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
307 ret = ni->lower.erase_block(ni->lower.arg, addr);
308 if (!ret)
309 return true;
310
311 nmbm_reset_chip(ni);
312 }
313
314 nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
315
316 return false;
317}
318
319/*
320 * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
321 * @ni: NMBM instance structure
322 * @ba: block address
323 */
324static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
325{
326 uint64_t addr = ba2addr(ni, ba);
327 int ret;
328
329 if (ni->lower.is_bad_block)
330 return ni->lower.is_bad_block(ni->lower.arg, addr);
331
332 /* Treat ECC error as read success */
333 ret = nmbm_read_phys_page(ni, addr, NULL,
334 ni->page_cache + ni->lower.writesize,
developerd8912b32021-06-16 17:22:36 +0800335 NMBM_MODE_RAW);
developerd1457c92021-06-16 17:23:18 +0800336 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +0800337 return true;
338
339 return ni->page_cache[ni->lower.writesize] != 0xff;
340}
341
342/*
343 * nmbm_mark_phys_bad_block - Mark a block bad
344 * @ni: NMBM instance structure
345 * @addr: Linear address
346 */
347static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
348{
349 uint64_t addr = ba2addr(ni, ba);
350 enum nmbm_log_category level;
351 uint32_t off;
352
developer49f853a2021-06-23 17:22:02 +0800353 if (ni->lower.flags & NMBM_F_READ_ONLY) {
354 nlog_err(ni, "%s called with NMBM_F_READ_ONLY set\n", addr);
355 return false;
356 }
357
developer8d16ac22021-05-26 15:32:12 +0800358 nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
359
360 if (ni->lower.mark_bad_block)
361 return ni->lower.mark_bad_block(ni->lower.arg, addr);
362
363 /* Whole page set to 0x00 */
364 memset(ni->page_cache, 0, ni->rawpage_size);
365
366 /* Write to all pages within this block, disable all errors */
367 level = nmbm_set_log_level(ni, __NMBM_LOG_MAX);
368
369 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
370 nmbm_write_phys_page(ni, addr + off, ni->page_cache,
371 ni->page_cache + ni->lower.writesize,
372 NMBM_MODE_RAW);
373 }
374
375 nmbm_set_log_level(ni, level);
376
377 return 0;
378}
379
380/*****************************************************************************/
381/* NMBM related functions */
382/*****************************************************************************/
383
384/*
385 * nmbm_check_header - Check whether a NMBM structure is valid
386 * @data: pointer to a NMBM structure with a NMBM header at beginning
387 * @size: Size of the buffer pointed by @header
388 *
389 * The size of the NMBM structure may be larger than NMBM header,
390 * e.g. block mapping table and block state table.
391 */
392static bool nmbm_check_header(const void *data, uint32_t size)
393{
394 const struct nmbm_header *header = data;
395 struct nmbm_header nhdr;
396 uint32_t new_checksum;
397
398 /*
399 * Make sure expected structure size is equal or smaller than
400 * buffer size.
401 */
402 if (header->size > size)
403 return false;
404
405 memcpy(&nhdr, data, sizeof(nhdr));
406
407 nhdr.checksum = 0;
408 new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
409 if (header->size > sizeof(nhdr))
410 new_checksum = nmbm_crc32(new_checksum,
411 (const uint8_t *)data + sizeof(nhdr),
412 header->size - sizeof(nhdr));
413
414 if (header->checksum != new_checksum)
415 return false;
416
417 return true;
418}
419
420/*
421 * nmbm_update_checksum - Update checksum of a NMBM structure
422 * @header: pointer to a NMBM structure with a NMBM header at beginning
423 *
424 * The size of the NMBM structure must be specified by @header->size
425 */
426static void nmbm_update_checksum(struct nmbm_header *header)
427{
428 header->checksum = 0;
429 header->checksum = nmbm_crc32(0, header, header->size);
430}
431
432/*
433 * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
434 * @block_count: number of blocks of data
435 *
436 * Calculate number of blocks should be reserved for data
437 */
438static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
439{
440 uint32_t val;
441
442 val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
443 val *= NMBM_SPARE_BLOCK_MULTI;
444
445 if (val < NMBM_SPARE_BLOCK_MIN)
446 val = NMBM_SPARE_BLOCK_MIN;
447
448 return val;
449}
450
451/*
452 * nmbm_get_block_state_raw - Get state of a block from raw block state table
453 * @block_state: pointer to raw block state table (bitmap)
454 * @ba: block address
455 */
456static uint32_t nmbm_get_block_state_raw(nmbm_bitmap_t *block_state,
457 uint32_t ba)
458{
459 uint32_t unit, shift;
460
461 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
462 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
463
464 return (block_state[unit] >> shift) & BLOCK_ST_MASK;
465}
466
467/*
468 * nmbm_get_block_state - Get state of a block from block state table
469 * @ni: NMBM instance structure
470 * @ba: block address
471 */
472static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
473{
474 return nmbm_get_block_state_raw(ni->block_state, ba);
475}
476
477/*
478 * nmbm_set_block_state - Set state of a block to block state table
479 * @ni: NMBM instance structure
480 * @ba: block address
481 * @state: block state
482 *
483 * Set state of a block. If the block state changed, ni->block_state_changed
484 * will be increased.
485 */
486static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
487 uint32_t state)
488{
489 uint32_t unit, shift, orig;
490 nmbm_bitmap_t uv;
491
492 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
493 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
494
495 orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
496 state &= BLOCK_ST_MASK;
497
498 uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
499 uv |= state << shift;
500 ni->block_state[unit] = uv;
501
502 if (state == BLOCK_ST_BAD)
503 nmbm_mark_block_color_bad(ni, ba);
504
505 if (orig != state) {
506 ni->block_state_changed++;
507 return true;
508 }
509
510 return false;
511}
512
513/*
514 * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
515 * @ni: NMBM instance structure
516 * @ba: start physical block address
517 * @nba: return physical block address after walk
518 * @count: number of good blocks to be skipped
519 * @limit: highest block address allowed for walking
520 *
521 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
522 * return the next good block address.
523 *
524 * If no enough good blocks counted while @limit reached, false will be returned.
525 *
526 * If @count == 0, nearest good block address will be returned.
527 * @limit is not counted in walking.
528 */
529static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
530 uint32_t *nba, uint32_t count,
531 uint32_t limit)
532{
533 int32_t nblock = count;
534
535 if (limit >= ni->block_count)
536 limit = ni->block_count - 1;
537
538 while (ba < limit) {
539 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
540 nblock--;
541
542 if (nblock < 0) {
543 *nba = ba;
544 return true;
545 }
546
547 ba++;
548 }
549
550 return false;
551}
552
553/*
554 * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
555 * @ni: NMBM instance structure
556 * @ba: start physical block address
557 * @nba: return physical block address after walk
558 * @count: number of good blocks to be skipped
559 * @limit: lowest block address allowed for walking
560 *
561 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
562 * return the next good block address.
563 *
564 * If no enough good blocks counted while @limit reached, false will be returned.
565 *
566 * If @count == 0, nearest good block address will be returned.
567 * @limit is not counted in walking.
568 */
569static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
570 uint32_t *nba, uint32_t count, uint32_t limit)
571{
572 int32_t nblock = count;
573
574 if (limit >= ni->block_count)
575 limit = ni->block_count - 1;
576
577 while (ba > limit) {
578 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
579 nblock--;
580
581 if (nblock < 0) {
582 *nba = ba;
583 return true;
584 }
585
586 ba--;
587 }
588
589 return false;
590}
591
592/*
593 * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
594 * @ni: NMBM instance structure
595 * @ascending: whether to walk ascending
596 * @ba: start physical block address
597 * @nba: return physical block address after walk
598 * @count: number of good blocks to be skipped
599 * @limit: highest/lowest block address allowed for walking
600 *
601 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
602 * return the next good block address.
603 *
604 * If no enough good blocks counted while @limit reached, false will be returned.
605 *
606 * If @count == 0, nearest good block address will be returned.
607 * @limit can be set to negative if no limit required.
608 * @limit is not counted in walking.
609 */
610static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
611 uint32_t ba, uint32_t *nba, int32_t count,
612 int32_t limit)
613{
614 if (ascending)
615 return nmbm_block_walk_asc(ni, ba, nba, count, limit);
616
617 return nmbm_block_walk_desc(ni, ba, nba, count, limit);
618}
619
620/*
621 * nmbm_scan_badblocks - Scan and record all bad blocks
622 * @ni: NMBM instance structure
623 *
624 * Scan the entire lower NAND chip and record all bad blocks in to block state
625 * table.
626 */
627static void nmbm_scan_badblocks(struct nmbm_instance *ni)
628{
629 uint32_t ba;
630
631 for (ba = 0; ba < ni->block_count; ba++) {
632 if (nmbm_check_bad_phys_block(ni, ba)) {
633 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
634 nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
635 ba2addr(ni, ba));
636 }
637 }
638}
639
640/*
641 * nmbm_build_mapping_table - Build initial block mapping table
642 * @ni: NMBM instance structure
643 *
644 * The initial mapping table will be compatible with the stratage of
645 * factory production.
646 */
647static void nmbm_build_mapping_table(struct nmbm_instance *ni)
648{
649 uint32_t pb, lb;
650
651 for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
652 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
653 continue;
654
655 /* Always map to the next good block */
656 ni->block_mapping[lb++] = pb;
657 }
658
659 ni->data_block_count = lb;
660
661 /* Unusable/Management blocks */
662 for (pb = lb; pb < ni->block_count; pb++)
663 ni->block_mapping[pb] = -1;
664}
665
666/*
developer28a313b2021-06-16 17:23:34 +0800667 * nmbm_erase_block_and_check - Erase a block and check its usability
668 * @ni: NMBM instance structure
669 * @ba: block address to be erased
670 *
671 * Erase a block anc check its usability
672 *
673 * Return true if the block is usable, false if erasure failure or the block
674 * has too many bitflips.
675 */
676static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba)
677{
678 uint64_t addr, off;
679 bool success;
680 int ret;
681
682 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
683 if (!success)
684 return false;
685
686 if (!(ni->lower.flags & NMBM_F_EMPTY_PAGE_ECC_OK))
687 return true;
688
689 /* Check every page to make sure there aren't too many bitflips */
690
691 addr = ba2addr(ni, ba);
692
693 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
694 WATCHDOG_RESET();
695
696 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
697 NMBM_MODE_PLACE_OOB);
698 if (ret == -EBADMSG) {
699 /*
700 * NMBM_F_EMPTY_PAGE_ECC_OK means the empty page is
701 * still protected by ECC. So reading pages with ECC
702 * enabled and -EBADMSG means there are too many
703 * bitflips that can't be recovered, and the block
704 * containing the page should be marked bad.
705 */
706 nlog_err(ni,
707 "Too many bitflips in empty page at 0x%llx\n",
708 addr + off);
709 return false;
710 }
711 }
712
713 return true;
714}
715
716/*
developer8d16ac22021-05-26 15:32:12 +0800717 * nmbm_erase_range - Erase a range of blocks
718 * @ni: NMBM instance structure
719 * @ba: block address where the erasure will start
720 * @limit: top block address allowed for erasure
721 *
722 * Erase blocks within the specific range. Newly-found bad blocks will be
723 * marked.
724 *
725 * @limit is not counted into the allowed erasure address.
726 */
727static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
728 uint32_t limit)
729{
730 bool success;
731
732 while (ba < limit) {
733 WATCHDOG_RESET();
734
735 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
736 goto next_block;
737
developer4f9017d2021-06-16 17:18:47 +0800738 /* Insurance to detect unexpected bad block marked by user */
739 if (nmbm_check_bad_phys_block(ni, ba)) {
740 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
741 goto next_block;
742 }
743
developer28a313b2021-06-16 17:23:34 +0800744 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800745 if (success)
746 goto next_block;
747
748 nmbm_mark_phys_bad_block(ni, ba);
749 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
750
751 next_block:
752 ba++;
753 }
754}
755
756/*
757 * nmbm_write_repeated_data - Write critical data to a block with retry
758 * @ni: NMBM instance structure
759 * @ba: block address where the data will be written to
760 * @data: the data to be written
761 * @size: size of the data
762 *
763 * Write data to every page of the block. Success only if all pages within
764 * this block have been successfully written.
765 *
766 * Make sure data size is not bigger than one page.
767 *
768 * This function will write and verify every page for at most
769 * NMBM_TRY_COUNT times.
770 */
771static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
772 const void *data, uint32_t size)
773{
774 uint64_t addr, off;
775 bool success;
776 int ret;
777
778 if (size > ni->lower.writesize)
779 return false;
780
781 addr = ba2addr(ni, ba);
782
783 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
784 WATCHDOG_RESET();
785
786 /* Prepare page data. fill 0xff to unused region */
787 memcpy(ni->page_cache, data, size);
788 memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
789
790 success = nmbm_write_phys_page(ni, addr + off, ni->page_cache,
791 NULL, NMBM_MODE_PLACE_OOB);
792 if (!success)
793 return false;
794
795 /* Verify the data just written. ECC error indicates failure */
796 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
797 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800798 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800799 return false;
800
801 if (memcmp(ni->page_cache, data, size))
802 return false;
803 }
804
805 return true;
806}
807
808/*
809 * nmbm_write_signature - Write signature to NAND chip
810 * @ni: NMBM instance structure
811 * @limit: top block address allowed for writing
812 * @signature: the signature to be written
813 * @signature_ba: the actual block address where signature is written to
814 *
815 * Write signature within a specific range, from chip bottom to limit.
816 * At most one block will be written.
817 *
818 * @limit is not counted into the allowed write address.
819 */
820static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
821 const struct nmbm_signature *signature,
822 uint32_t *signature_ba)
823{
824 uint32_t ba = ni->block_count - 1;
825 bool success;
826
827 while (ba > limit) {
828 WATCHDOG_RESET();
829
830 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
831 goto next_block;
developer4f9017d2021-06-16 17:18:47 +0800832
833 /* Insurance to detect unexpected bad block marked by user */
834 if (nmbm_check_bad_phys_block(ni, ba)) {
835 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
836 goto next_block;
837 }
developer8d16ac22021-05-26 15:32:12 +0800838
developer28a313b2021-06-16 17:23:34 +0800839 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800840 if (!success)
841 goto skip_bad_block;
842
843 success = nmbm_write_repeated_data(ni, ba, signature,
844 sizeof(*signature));
845 if (success) {
846 *signature_ba = ba;
847 return true;
848 }
849
850 skip_bad_block:
851 nmbm_mark_phys_bad_block(ni, ba);
852 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
853
854 next_block:
855 ba--;
856 };
857
858 return false;
859}
860
861/*
862 * nmbn_read_data - Read data
863 * @ni: NMBM instance structure
864 * @addr: linear address where the data will be read from
865 * @data: the data to be read
866 * @size: the size of data
867 *
868 * Read data range.
869 * Every page will be tried for at most NMBM_TRY_COUNT times.
870 *
developerd1457c92021-06-16 17:23:18 +0800871 * Return 0 for success, positive value for corrected bitflip count,
872 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +0800873 */
874static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
875 uint32_t size)
876{
877 uint64_t off = addr;
878 uint8_t *ptr = data;
879 uint32_t sizeremain = size, chunksize, leading;
880 int ret;
881
882 while (sizeremain) {
883 WATCHDOG_RESET();
884
885 leading = off & ni->writesize_mask;
886 chunksize = ni->lower.writesize - leading;
887 if (chunksize > sizeremain)
888 chunksize = sizeremain;
889
890 if (chunksize == ni->lower.writesize) {
891 ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL,
892 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800893 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800894 return ret;
895 } else {
896 ret = nmbm_read_phys_page(ni, off - leading,
897 ni->page_cache, NULL,
898 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800899 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800900 return ret;
901
902 memcpy(ptr, ni->page_cache + leading, chunksize);
903 }
904
905 off += chunksize;
906 ptr += chunksize;
907 sizeremain -= chunksize;
908 }
909
910 return 0;
911}
912
913/*
914 * nmbn_write_verify_data - Write data with validation
915 * @ni: NMBM instance structure
916 * @addr: linear address where the data will be written to
917 * @data: the data to be written
918 * @size: the size of data
919 *
920 * Write data and verify.
921 * Every page will be tried for at most NMBM_TRY_COUNT times.
922 */
923static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
924 const void *data, uint32_t size)
925{
926 uint64_t off = addr;
927 const uint8_t *ptr = data;
928 uint32_t sizeremain = size, chunksize, leading;
929 bool success;
930 int ret;
931
932 while (sizeremain) {
933 WATCHDOG_RESET();
934
935 leading = off & ni->writesize_mask;
936 chunksize = ni->lower.writesize - leading;
937 if (chunksize > sizeremain)
938 chunksize = sizeremain;
939
940 /* Prepare page data. fill 0xff to unused region */
941 memset(ni->page_cache, 0xff, ni->rawpage_size);
942 memcpy(ni->page_cache + leading, ptr, chunksize);
943
944 success = nmbm_write_phys_page(ni, off - leading,
945 ni->page_cache, NULL,
946 NMBM_MODE_PLACE_OOB);
947 if (!success)
948 return false;
949
950 /* Verify the data just written. ECC error indicates failure */
951 ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache,
952 NULL, NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800953 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800954 return false;
955
956 if (memcmp(ni->page_cache + leading, ptr, chunksize))
957 return false;
958
959 off += chunksize;
960 ptr += chunksize;
961 sizeremain -= chunksize;
962 }
963
964 return true;
965}
966
967/*
968 * nmbm_write_mgmt_range - Write management data into NAND within a range
969 * @ni: NMBM instance structure
970 * @addr: preferred start block address for writing
971 * @limit: highest block address allowed for writing
972 * @data: the data to be written
973 * @size: the size of data
974 * @actual_start_ba: actual start block address of data
975 * @actual_end_ba: block address after the end of data
976 *
977 * @limit is not counted into the allowed write address.
978 */
979static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
980 uint32_t limit, const void *data,
981 uint32_t size, uint32_t *actual_start_ba,
982 uint32_t *actual_end_ba)
983{
984 const uint8_t *ptr = data;
985 uint32_t sizeremain = size, chunksize;
986 bool success;
987
988 while (sizeremain && ba < limit) {
989 WATCHDOG_RESET();
990
991 chunksize = sizeremain;
992 if (chunksize > ni->lower.erasesize)
993 chunksize = ni->lower.erasesize;
994
995 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
996 goto next_block;
997
developer4f9017d2021-06-16 17:18:47 +0800998 /* Insurance to detect unexpected bad block marked by user */
999 if (nmbm_check_bad_phys_block(ni, ba)) {
1000 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1001 goto next_block;
1002 }
1003
developer28a313b2021-06-16 17:23:34 +08001004 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +08001005 if (!success)
1006 goto skip_bad_block;
1007
1008 success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
1009 chunksize);
1010 if (!success)
1011 goto skip_bad_block;
1012
1013 if (sizeremain == size)
1014 *actual_start_ba = ba;
1015
1016 ptr += chunksize;
1017 sizeremain -= chunksize;
1018
1019 goto next_block;
1020
1021 skip_bad_block:
1022 nmbm_mark_phys_bad_block(ni, ba);
1023 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1024
1025 next_block:
1026 ba++;
1027 }
1028
1029 if (sizeremain)
1030 return false;
1031
1032 *actual_end_ba = ba;
1033
1034 return true;
1035}
1036
1037/*
1038 * nmbm_generate_info_table_cache - Generate info table cache data
1039 * @ni: NMBM instance structure
1040 *
1041 * Generate info table cache data to be written into flash.
1042 */
1043static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
1044{
1045 bool changed = false;
1046
1047 memset(ni->info_table_cache, 0xff, ni->info_table_size);
1048
1049 memcpy(ni->info_table_cache + ni->info_table.state_table_off,
1050 ni->block_state, ni->state_table_size);
1051
1052 memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
1053 ni->block_mapping, ni->mapping_table_size);
1054
1055 ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
1056 ni->info_table.header.version = NMBM_VER;
1057 ni->info_table.header.size = ni->info_table_size;
1058
1059 if (ni->block_state_changed || ni->block_mapping_changed) {
1060 ni->info_table.write_count++;
1061 changed = true;
1062 }
1063
1064 memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
1065
1066 nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
1067
1068 return changed;
1069}
1070
1071/*
1072 * nmbm_write_info_table - Write info table into NAND within a range
1073 * @ni: NMBM instance structure
1074 * @ba: preferred start block address for writing
1075 * @limit: highest block address allowed for writing
1076 * @actual_start_ba: actual start block address of info table
1077 * @actual_end_ba: block address after the end of info table
1078 *
1079 * @limit is counted into the allowed write address.
1080 */
1081static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
1082 uint32_t limit, uint32_t *actual_start_ba,
1083 uint32_t *actual_end_ba)
1084{
1085 return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
1086 ni->info_table_size, actual_start_ba,
1087 actual_end_ba);
1088}
1089
1090/*
1091 * nmbm_mark_tables_clean - Mark info table `clean'
1092 * @ni: NMBM instance structure
1093 */
1094static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
1095{
1096 ni->block_state_changed = 0;
1097 ni->block_mapping_changed = 0;
1098}
1099
1100/*
1101 * nmbm_try_reserve_blocks - Reserve blocks with compromisation
1102 * @ni: NMBM instance structure
1103 * @ba: start physical block address
1104 * @nba: return physical block address after reservation
1105 * @count: number of good blocks to be skipped
1106 * @min_count: minimum number of good blocks to be skipped
1107 * @limit: highest/lowest block address allowed for walking
1108 *
1109 * Reserve specific blocks. If failed, try to reserve as many as possible.
1110 */
1111static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
1112 uint32_t *nba, uint32_t count,
1113 int32_t min_count, int32_t limit)
1114{
1115 int32_t nblocks = count;
1116 bool success;
1117
1118 while (nblocks >= min_count) {
1119 success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
1120 if (success)
1121 return true;
1122
1123 nblocks--;
1124 }
1125
1126 return false;
1127}
1128
1129/*
1130 * nmbm_rebuild_info_table - Build main & backup info table from scratch
1131 * @ni: NMBM instance structure
1132 * @allow_no_gap: allow no spare blocks between two tables
1133 */
1134static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
1135{
1136 uint32_t table_start_ba, table_end_ba, next_start_ba;
1137 uint32_t main_table_end_ba;
1138 bool success;
1139
1140 /* Set initial value */
1141 ni->main_table_ba = 0;
1142 ni->backup_table_ba = 0;
1143 ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
1144
1145 /* Write main table */
1146 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1147 ni->mapping_blocks_top_ba,
1148 &table_start_ba, &table_end_ba);
1149 if (!success) {
1150 /* Failed to write main table, data will be lost */
1151 nlog_emerg(ni, "Unable to write at least one info table!\n");
1152 nlog_emerg(ni, "Please save your data before power off!\n");
1153 ni->protected = 1;
1154 return false;
1155 }
1156
1157 /* Main info table is successfully written, record its offset */
1158 ni->main_table_ba = table_start_ba;
1159 main_table_end_ba = table_end_ba;
1160
1161 /* Adjust mapping_blocks_ba */
1162 ni->mapping_blocks_ba = table_end_ba;
1163
1164 nmbm_mark_tables_clean(ni);
1165
1166 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1167
1168 /* Reserve spare blocks for main info table. */
1169 success = nmbm_try_reserve_blocks(ni, table_end_ba,
1170 &next_start_ba,
1171 ni->info_table_spare_blocks, 0,
1172 ni->mapping_blocks_top_ba -
1173 size2blk(ni, ni->info_table_size));
1174 if (!success) {
1175 /* There is no spare block. */
1176 nlog_debug(ni, "No room for backup info table\n");
1177 return true;
1178 }
1179
1180 /* Write backup info table. */
1181 success = nmbm_write_info_table(ni, next_start_ba,
1182 ni->mapping_blocks_top_ba,
1183 &table_start_ba, &table_end_ba);
1184 if (!success) {
1185 /* There is no enough blocks for backup table. */
1186 nlog_debug(ni, "No room for backup info table\n");
1187 return true;
1188 }
1189
1190 /* Backup table is successfully written, record its offset */
1191 ni->backup_table_ba = table_start_ba;
1192
1193 /* Adjust mapping_blocks_off */
1194 ni->mapping_blocks_ba = table_end_ba;
1195
1196 /* Erase spare blocks of main table to clean possible interference data */
1197 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1198
1199 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1200
1201 return true;
1202}
1203
1204/*
1205 * nmbm_rescue_single_info_table - Rescue when there is only one info table
1206 * @ni: NMBM instance structure
1207 *
1208 * This function is called when there is only one info table exists.
1209 * This function may fail if we can't write new info table
1210 */
1211static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
1212{
1213 uint32_t table_start_ba, table_end_ba, write_ba;
1214 bool success;
1215
1216 /* Try to write new info table in front of existing table */
1217 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1218 ni->main_table_ba,
1219 &table_start_ba,
1220 &table_end_ba);
1221 if (success) {
1222 /*
1223 * New table becomes the main table, existing table becomes
1224 * the backup table.
1225 */
1226 ni->backup_table_ba = ni->main_table_ba;
1227 ni->main_table_ba = table_start_ba;
1228
1229 nmbm_mark_tables_clean(ni);
1230
1231 /* Erase spare blocks of main table to clean possible interference data */
1232 nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
1233
1234 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1235
1236 return true;
1237 }
1238
1239 /* Try to reserve spare blocks for existing table */
1240 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1241 ni->info_table_spare_blocks, 0,
1242 ni->mapping_blocks_top_ba -
1243 size2blk(ni, ni->info_table_size));
1244 if (!success) {
1245 nlog_warn(ni, "Failed to rescue single info table\n");
1246 return false;
1247 }
1248
1249 /* Try to write new info table next to the existing table */
1250 while (write_ba >= ni->mapping_blocks_ba) {
1251 WATCHDOG_RESET();
1252
1253 success = nmbm_write_info_table(ni, write_ba,
1254 ni->mapping_blocks_top_ba,
1255 &table_start_ba,
1256 &table_end_ba);
1257 if (success)
1258 break;
1259
1260 write_ba--;
1261 }
1262
1263 if (success) {
1264 /* Erase spare blocks of main table to clean possible interference data */
1265 nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
1266
1267 /* New table becomes the backup table */
1268 ni->backup_table_ba = table_start_ba;
1269 ni->mapping_blocks_ba = table_end_ba;
1270
1271 nmbm_mark_tables_clean(ni);
1272
1273 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1274
1275 return true;
1276 }
1277
1278 nlog_warn(ni, "Failed to rescue single info table\n");
1279 return false;
1280}
1281
1282/*
1283 * nmbm_update_single_info_table - Update specific one info table
1284 * @ni: NMBM instance structure
1285 */
1286static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
1287 bool update_main_table)
1288{
1289 uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
1290 bool success;
1291
1292 /* Determine the write range */
1293 if (update_main_table) {
1294 write_start_ba = ni->main_table_ba;
1295 write_limit = ni->backup_table_ba;
1296 } else {
1297 write_start_ba = ni->backup_table_ba;
1298 write_limit = ni->mapping_blocks_top_ba;
1299 }
1300
1301 nmbm_mark_block_color_mgmt(ni, write_start_ba, write_limit - 1);
1302
1303 success = nmbm_write_info_table(ni, write_start_ba, write_limit,
1304 &table_start_ba, &table_end_ba);
1305 if (success) {
1306 if (update_main_table) {
1307 ni->main_table_ba = table_start_ba;
1308 } else {
1309 ni->backup_table_ba = table_start_ba;
1310 ni->mapping_blocks_ba = table_end_ba;
1311 }
1312
1313 nmbm_mark_tables_clean(ni);
1314
1315 nlog_table_update(ni, update_main_table, table_start_ba,
1316 table_end_ba);
1317
1318 return true;
1319 }
1320
1321 if (update_main_table) {
1322 /*
1323 * If failed to update main table, make backup table the new
1324 * main table, and call nmbm_rescue_single_info_table()
1325 */
1326 nlog_warn(ni, "Unable to update %s info table\n",
1327 update_main_table ? "Main" : "Backup");
1328
1329 ni->main_table_ba = ni->backup_table_ba;
1330 ni->backup_table_ba = 0;
1331 return nmbm_rescue_single_info_table(ni);
1332 }
1333
1334 /* Only one table left */
1335 ni->mapping_blocks_ba = ni->backup_table_ba;
1336 ni->backup_table_ba = 0;
1337
1338 return false;
1339}
1340
1341/*
1342 * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1343 * @ni: NMBM instance structure
1344 *
1345 * This function is called when main info table failed to be written, and
1346 * backup info table exists.
1347 */
1348static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
1349{
1350 uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
1351 uint32_t main_table_end_ba, write_ba;
1352 uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
1353 bool success;
1354
1355 /* Try to reserve spare blocks for existing backup info table */
1356 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1357 ni->info_table_spare_blocks, 0,
1358 ni->mapping_blocks_top_ba -
1359 info_table_erasesize);
1360 if (!success) {
1361 /* There is no spare block. Backup info table becomes the main table. */
1362 nlog_err(ni, "No room for temporary info table\n");
1363 ni->main_table_ba = ni->backup_table_ba;
1364 ni->backup_table_ba = 0;
1365 return true;
1366 }
1367
1368 /* Try to write temporary info table into spare unmapped blocks */
1369 while (write_ba >= ni->mapping_blocks_ba) {
1370 WATCHDOG_RESET();
1371
1372 success = nmbm_write_info_table(ni, write_ba,
1373 ni->mapping_blocks_top_ba,
1374 &tmp_table_start_ba,
1375 &tmp_table_end_ba);
1376 if (success)
1377 break;
1378
1379 write_ba--;
1380 }
1381
1382 if (!success) {
1383 /* Backup info table becomes the main table */
1384 nlog_err(ni, "Failed to update main info table\n");
1385 ni->main_table_ba = ni->backup_table_ba;
1386 ni->backup_table_ba = 0;
1387 return true;
1388 }
1389
1390 /* Adjust mapping_blocks_off */
1391 ni->mapping_blocks_ba = tmp_table_end_ba;
1392
1393 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1394 tmp_table_end_ba - 1);
1395
1396 /*
1397 * Now write main info table at the beginning of management area.
1398 * This operation will generally destroy the original backup info
1399 * table.
1400 */
1401 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1402 tmp_table_start_ba,
1403 &main_table_start_ba,
1404 &main_table_end_ba);
1405 if (!success) {
1406 /* Temporary info table becomes the main table */
1407 ni->main_table_ba = tmp_table_start_ba;
1408 ni->backup_table_ba = 0;
1409
1410 nmbm_mark_tables_clean(ni);
1411
1412 nlog_err(ni, "Failed to update main info table\n");
1413 nmbm_mark_block_color_info_table(ni, tmp_table_start_ba,
1414 tmp_table_end_ba - 1);
1415
1416 return true;
1417 }
1418
1419 /* Main info table has been successfully written, record its offset */
1420 ni->main_table_ba = main_table_start_ba;
1421
1422 nmbm_mark_tables_clean(ni);
1423
1424 nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
1425
1426 /*
1427 * Temporary info table becomes the new backup info table if it's
1428 * not overwritten.
1429 */
1430 if (main_table_end_ba <= tmp_table_start_ba) {
1431 ni->backup_table_ba = tmp_table_start_ba;
1432
1433 nlog_table_creation(ni, false, tmp_table_start_ba,
1434 tmp_table_end_ba);
1435
1436 return true;
1437 }
1438
1439 /* Adjust mapping_blocks_off */
1440 ni->mapping_blocks_ba = main_table_end_ba;
1441
1442 /* Try to reserve spare blocks for new main info table */
1443 success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
1444 ni->info_table_spare_blocks, 0,
1445 ni->mapping_blocks_top_ba -
1446 info_table_erasesize);
1447 if (!success) {
1448 /* There is no spare block. Only main table exists. */
1449 nlog_err(ni, "No room for backup info table\n");
1450 ni->backup_table_ba = 0;
1451 return true;
1452 }
1453
1454 /* Write new backup info table. */
1455 while (write_ba >= main_table_end_ba) {
1456 WATCHDOG_RESET();
1457
1458 success = nmbm_write_info_table(ni, write_ba,
1459 ni->mapping_blocks_top_ba,
1460 &tmp_table_start_ba,
1461 &tmp_table_end_ba);
1462 if (success)
1463 break;
1464
1465 write_ba--;
1466 }
1467
1468 if (!success) {
1469 nlog_err(ni, "No room for backup info table\n");
1470 ni->backup_table_ba = 0;
1471 return true;
1472 }
1473
1474 /* Backup info table has been successfully written, record its offset */
1475 ni->backup_table_ba = tmp_table_start_ba;
1476
1477 /* Adjust mapping_blocks_off */
1478 ni->mapping_blocks_ba = tmp_table_end_ba;
1479
1480 /* Erase spare blocks of main table to clean possible interference data */
1481 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1482
1483 nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
1484
1485 return true;
1486}
1487
1488/*
1489 * nmbm_update_info_table_once - Update info table once
1490 * @ni: NMBM instance structure
1491 * @force: force update
1492 *
1493 * Update both main and backup info table. Return true if at least one info
1494 * table has been successfully written.
1495 * This function only try to update info table once regard less of the result.
1496 */
1497static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
1498{
1499 uint32_t table_start_ba, table_end_ba;
1500 uint32_t main_table_limit;
1501 bool success;
1502
1503 /* Do nothing if there is no change */
1504 if (!nmbm_generate_info_table_cache(ni) && !force)
1505 return true;
1506
1507 /* Check whether both two tables exist */
1508 if (!ni->backup_table_ba) {
1509 main_table_limit = ni->mapping_blocks_top_ba;
1510 goto write_main_table;
1511 }
1512
1513 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1514 ni->mapping_blocks_ba - 1);
1515
1516 /*
1517 * Write backup info table in its current range.
1518 * Note that limit is set to mapping_blocks_top_off to provide as many
1519 * spare blocks as possible for the backup table. If at last
1520 * unmapped blocks are used by backup table, mapping_blocks_off will
1521 * be adjusted.
1522 */
1523 success = nmbm_write_info_table(ni, ni->backup_table_ba,
1524 ni->mapping_blocks_top_ba,
1525 &table_start_ba, &table_end_ba);
1526 if (!success) {
1527 /*
1528 * There is nothing to do if failed to write backup table.
1529 * Write the main table now.
1530 */
1531 nlog_err(ni, "No room for backup table\n");
1532 ni->mapping_blocks_ba = ni->backup_table_ba;
1533 ni->backup_table_ba = 0;
1534 main_table_limit = ni->mapping_blocks_top_ba;
1535 goto write_main_table;
1536 }
1537
1538 /* Backup table is successfully written, record its offset */
1539 ni->backup_table_ba = table_start_ba;
1540
1541 /* Adjust mapping_blocks_off */
1542 ni->mapping_blocks_ba = table_end_ba;
1543
1544 nmbm_mark_tables_clean(ni);
1545
1546 /* The normal limit of main table */
1547 main_table_limit = ni->backup_table_ba;
1548
1549 nlog_table_update(ni, false, table_start_ba, table_end_ba);
1550
1551write_main_table:
1552 if (!ni->main_table_ba)
1553 goto rebuild_tables;
1554
1555 if (!ni->backup_table_ba)
1556 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1557 ni->mapping_blocks_ba - 1);
1558 else
1559 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1560 ni->backup_table_ba - 1);
1561
1562 /* Write main info table in its current range */
1563 success = nmbm_write_info_table(ni, ni->main_table_ba,
1564 main_table_limit, &table_start_ba,
1565 &table_end_ba);
1566 if (!success) {
1567 /* If failed to write main table, go rescue procedure */
1568 if (!ni->backup_table_ba)
1569 goto rebuild_tables;
1570
1571 return nmbm_rescue_main_info_table(ni);
1572 }
1573
1574 /* Main info table is successfully written, record its offset */
1575 ni->main_table_ba = table_start_ba;
1576
1577 /* Adjust mapping_blocks_off */
1578 if (!ni->backup_table_ba)
1579 ni->mapping_blocks_ba = table_end_ba;
1580
1581 nmbm_mark_tables_clean(ni);
1582
1583 nlog_table_update(ni, true, table_start_ba, table_end_ba);
1584
1585 return true;
1586
1587rebuild_tables:
1588 return nmbm_rebuild_info_table(ni);
1589}
1590
1591/*
1592 * nmbm_update_info_table - Update info table
1593 * @ni: NMBM instance structure
1594 *
1595 * Update both main and backup info table. Return true if at least one table
1596 * has been successfully written.
1597 * This function will try to update info table repeatedly until no new bad
1598 * block found during updating.
1599 */
1600static bool nmbm_update_info_table(struct nmbm_instance *ni)
1601{
1602 bool success;
1603
1604 if (ni->protected)
1605 return true;
1606
1607 while (ni->block_state_changed || ni->block_mapping_changed) {
1608 success = nmbm_update_info_table_once(ni, false);
1609 if (!success) {
1610 nlog_err(ni, "Failed to update info table\n");
1611 return false;
1612 }
1613 }
1614
1615 return true;
1616}
1617
1618/*
1619 * nmbm_map_block - Map a bad block to a unused spare block
1620 * @ni: NMBM instance structure
1621 * @lb: logic block addr to map
1622 */
1623static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
1624{
1625 uint32_t pb;
1626 bool success;
1627
1628 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1629 nlog_warn(ni, "No spare unmapped blocks.\n");
1630 return false;
1631 }
1632
1633 success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
1634 ni->mapping_blocks_ba);
1635 if (!success) {
1636 nlog_warn(ni, "No spare unmapped blocks.\n");
1637 nmbm_update_info_table(ni);
1638 ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
1639 return false;
1640 }
1641
1642 ni->block_mapping[lb] = pb;
1643 ni->mapping_blocks_top_ba--;
1644 ni->block_mapping_changed++;
1645
1646 nlog_info(ni, "Logic block %u mapped to physical blcok %u\n", lb, pb);
1647 nmbm_mark_block_color_mapped(ni, pb);
1648
1649 return true;
1650}
1651
1652/*
1653 * nmbm_create_info_table - Create info table(s)
1654 * @ni: NMBM instance structure
1655 *
1656 * This function assumes that the chip has no existing info table(s)
1657 */
1658static bool nmbm_create_info_table(struct nmbm_instance *ni)
1659{
1660 uint32_t lb;
1661 bool success;
1662
1663 /* Set initial mapping_blocks_top_off */
1664 success = nmbm_block_walk(ni, false, ni->signature_ba,
1665 &ni->mapping_blocks_top_ba, 1,
1666 ni->mgmt_start_ba);
1667 if (!success) {
1668 nlog_err(ni, "No room for spare blocks\n");
1669 return false;
1670 }
1671
1672 /* Generate info table cache */
1673 nmbm_generate_info_table_cache(ni);
1674
1675 /* Write info table */
1676 success = nmbm_rebuild_info_table(ni);
1677 if (!success) {
1678 nlog_err(ni, "Failed to build info tables\n");
1679 return false;
1680 }
1681
1682 /* Remap bad block(s) at end of data area */
1683 for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
1684 success = nmbm_map_block(ni, lb);
1685 if (!success)
1686 break;
1687
1688 ni->data_block_count++;
1689 }
1690
1691 /* If state table and/or mapping table changed, update info table. */
1692 success = nmbm_update_info_table(ni);
1693 if (!success)
1694 return false;
1695
1696 return true;
1697}
1698
1699/*
1700 * nmbm_create_new - Create NMBM on a new chip
1701 * @ni: NMBM instance structure
1702 */
1703static bool nmbm_create_new(struct nmbm_instance *ni)
1704{
1705 bool success;
1706
1707 /* Determine the boundary of management blocks */
1708 ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->lower.max_ratio) / NMBM_MGMT_DIV;
1709
1710 if (ni->lower.max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->lower.max_reserved_blocks)
1711 ni->mgmt_start_ba = ni->block_count - ni->lower.max_reserved_blocks;
1712
1713 nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1714 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1715 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba, ni->block_count - 1);
1716
1717 /* Fill block state table & mapping table */
1718 nmbm_scan_badblocks(ni);
1719 nmbm_build_mapping_table(ni);
1720
1721 /* Write signature */
1722 ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
1723 ni->signature.header.version = NMBM_VER;
1724 ni->signature.header.size = sizeof(ni->signature);
1725 ni->signature.nand_size = ni->lower.size;
1726 ni->signature.block_size = ni->lower.erasesize;
1727 ni->signature.page_size = ni->lower.writesize;
1728 ni->signature.spare_size = ni->lower.oobsize;
1729 ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
1730 ni->signature.max_try_count = NMBM_TRY_COUNT;
1731 nmbm_update_checksum(&ni->signature.header);
1732
developer49f853a2021-06-23 17:22:02 +08001733 if (ni->lower.flags & NMBM_F_READ_ONLY) {
1734 nlog_info(ni, "NMBM has been initialized in read-only mode\n");
1735 return true;
1736 }
1737
developer8d16ac22021-05-26 15:32:12 +08001738 success = nmbm_write_signature(ni, ni->mgmt_start_ba,
1739 &ni->signature, &ni->signature_ba);
1740 if (!success) {
1741 nlog_err(ni, "Failed to write signature to a proper offset\n");
1742 return false;
1743 }
1744
1745 nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
1746 ni->signature_ba, ba2addr(ni, ni->signature_ba));
1747 nmbm_mark_block_color_signature(ni, ni->signature_ba);
1748
1749 /* Write info table(s) */
1750 success = nmbm_create_info_table(ni);
1751 if (success) {
1752 nlog_info(ni, "NMBM has been successfully created\n");
1753 return true;
1754 }
1755
1756 return false;
1757}
1758
1759/*
1760 * nmbm_check_info_table_header - Check if a info table header is valid
1761 * @ni: NMBM instance structure
1762 * @data: pointer to the info table header
1763 */
1764static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
1765{
1766 struct nmbm_info_table_header *ifthdr = data;
1767
1768 if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
1769 return false;
1770
1771 if (ifthdr->header.size != ni->info_table_size)
1772 return false;
1773
1774 if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
1775 return false;
1776
1777 if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
1778 return false;
1779
1780 return true;
1781}
1782
1783/*
1784 * nmbm_check_info_table - Check if a whole info table is valid
1785 * @ni: NMBM instance structure
1786 * @start_ba: start block address of this table
1787 * @end_ba: end block address of this table
1788 * @data: pointer to the info table header
1789 * @mapping_blocks_top_ba: return the block address of top remapped block
1790 */
1791static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
1792 uint32_t end_ba, void *data,
1793 uint32_t *mapping_blocks_top_ba)
1794{
1795 struct nmbm_info_table_header *ifthdr = data;
1796 int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
1797 nmbm_bitmap_t *block_state = (nmbm_bitmap_t *)((uintptr_t)data + ifthdr->state_table_off);
1798 uint32_t minimum_mapping_pb = ni->signature_ba;
1799 uint32_t ba;
1800
1801 for (ba = 0; ba < ni->data_block_count; ba++) {
1802 if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
1803 block_mapping[ba] == ni->signature_ba)
1804 return false;
1805
1806 if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
1807 minimum_mapping_pb = block_mapping[ba];
1808 }
1809
1810 for (ba = start_ba; ba < end_ba; ba++) {
1811 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1812 continue;
1813
1814 if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
1815 return false;
1816 }
1817
1818 *mapping_blocks_top_ba = minimum_mapping_pb - 1;
1819
1820 return true;
1821}
1822
1823/*
1824 * nmbm_try_load_info_table - Try to load info table from a address
1825 * @ni: NMBM instance structure
1826 * @ba: start block address of the info table
1827 * @eba: return the block address after end of the table
1828 * @write_count: return the write count of this table
1829 * @mapping_blocks_top_ba: return the block address of top remapped block
1830 * @table_loaded: used to record whether ni->info_table has valid data
1831 */
1832static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1833 uint32_t *eba, uint32_t *write_count,
1834 uint32_t *mapping_blocks_top_ba,
1835 bool table_loaded)
1836{
1837 struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
1838 uint8_t *off = ni->info_table_cache;
1839 uint32_t limit = ba + size2blk(ni, ni->info_table_size);
1840 uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
1841 bool success, checkhdr = true;
1842 int ret;
1843
1844 while (sizeremain && ba < limit) {
1845 WATCHDOG_RESET();
1846
1847 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1848 goto next_block;
1849
1850 if (nmbm_check_bad_phys_block(ni, ba)) {
1851 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1852 goto next_block;
1853 }
1854
1855 chunksize = sizeremain;
1856 if (chunksize > ni->lower.erasesize)
1857 chunksize = ni->lower.erasesize;
1858
1859 /* Assume block with ECC error has no info table data */
1860 ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
1861 if (ret < 0)
1862 goto skip_bad_block;
1863 else if (ret > 0)
1864 return false;
1865
1866 if (checkhdr) {
1867 success = nmbm_check_info_table_header(ni, off);
1868 if (!success)
1869 return false;
1870
1871 start_ba = ba;
1872 checkhdr = false;
1873 }
1874
1875 off += chunksize;
1876 sizeremain -= chunksize;
1877
1878 goto next_block;
1879
1880 skip_bad_block:
1881 /* Only mark bad in memory */
1882 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1883
1884 next_block:
1885 ba++;
1886 }
1887
1888 if (sizeremain)
1889 return false;
1890
1891 success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
1892 if (!success)
1893 return false;
1894
1895 *eba = ba;
1896 *write_count = ifthdr->write_count;
1897
1898 success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
1899 mapping_blocks_top_ba);
1900 if (!success)
1901 return false;
1902
1903 if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
1904 memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
1905 memcpy(ni->block_state,
1906 (uint8_t *)ifthdr + ifthdr->state_table_off,
1907 ni->state_table_size);
1908 memcpy(ni->block_mapping,
1909 (uint8_t *)ifthdr + ifthdr->mapping_table_off,
1910 ni->mapping_table_size);
1911 ni->info_table.write_count = ifthdr->write_count;
1912 }
1913
1914 return true;
1915}
1916
1917/*
1918 * nmbm_search_info_table - Search info table from specific address
1919 * @ni: NMBM instance structure
1920 * @ba: start block address to search
1921 * @limit: highest block address allowed for searching
1922 * @table_start_ba: return the start block address of this table
1923 * @table_end_ba: return the block address after end of this table
1924 * @write_count: return the write count of this table
1925 * @mapping_blocks_top_ba: return the block address of top remapped block
1926 * @table_loaded: used to record whether ni->info_table has valid data
1927 */
1928static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
1929 uint32_t limit, uint32_t *table_start_ba,
1930 uint32_t *table_end_ba,
1931 uint32_t *write_count,
1932 uint32_t *mapping_blocks_top_ba,
1933 bool table_loaded)
1934{
1935 bool success;
1936
1937 while (ba < limit - size2blk(ni, ni->info_table_size)) {
1938 WATCHDOG_RESET();
1939
1940 success = nmbm_try_load_info_table(ni, ba, table_end_ba,
1941 write_count,
1942 mapping_blocks_top_ba,
1943 table_loaded);
1944 if (success) {
1945 *table_start_ba = ba;
1946 return true;
1947 }
1948
1949 ba++;
1950 }
1951
1952 return false;
1953}
1954
1955/*
1956 * nmbm_load_info_table - Load info table(s) from a chip
1957 * @ni: NMBM instance structure
1958 * @ba: start block address to search info table
1959 * @limit: highest block address allowed for searching
1960 */
1961static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1962 uint32_t limit)
1963{
1964 uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
1965 uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
1966 uint32_t main_table_write_count, backup_table_write_count;
1967 uint32_t i;
1968 bool success;
1969
1970 /* Set initial value */
1971 ni->main_table_ba = 0;
1972 ni->backup_table_ba = 0;
1973 ni->info_table.write_count = 0;
1974 ni->mapping_blocks_top_ba = ni->signature_ba - 1;
1975 ni->data_block_count = ni->signature.mgmt_start_pb;
1976
1977 /* Find first info table */
1978 success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
1979 &main_table_end_ba, &main_table_write_count,
1980 &main_mapping_blocks_top_ba, false);
1981 if (!success) {
1982 nlog_warn(ni, "No valid info table found\n");
1983 return false;
1984 }
1985
1986 table_end_ba = main_table_end_ba;
1987
1988 nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
1989 main_table_end_ba);
1990
1991 /* Find second info table */
1992 success = nmbm_search_info_table(ni, main_table_end_ba, limit,
1993 &ni->backup_table_ba, &backup_table_end_ba,
1994 &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
1995 if (!success) {
1996 nlog_warn(ni, "Second info table not found\n");
1997 } else {
1998 table_end_ba = backup_table_end_ba;
1999
2000 nlog_table_found(ni, false, backup_table_write_count,
2001 ni->backup_table_ba, backup_table_end_ba);
2002 }
2003
2004 /* Pick mapping_blocks_top_ba */
2005 if (!ni->backup_table_ba) {
2006 ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
2007 } else {
2008 if (main_table_write_count >= backup_table_write_count)
2009 ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
2010 else
2011 ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
2012 }
2013
2014 /* Set final mapping_blocks_ba */
2015 ni->mapping_blocks_ba = table_end_ba;
2016
2017 /* Set final data_block_count */
2018 for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
2019 if (ni->block_mapping[i - 1] >= 0) {
2020 ni->data_block_count = i;
2021 break;
2022 }
2023 }
2024
2025 /* Debug purpose: mark mapped blocks and bad blocks */
2026 for (i = 0; i < ni->data_block_count; i++) {
2027 if (ni->block_mapping[i] > ni->mapping_blocks_top_ba)
2028 nmbm_mark_block_color_mapped(ni, ni->block_mapping[i]);
2029 }
2030
2031 for (i = 0; i < ni->block_count; i++) {
2032 if (nmbm_get_block_state(ni, i) == BLOCK_ST_BAD)
2033 nmbm_mark_block_color_bad(ni, i);
2034 }
2035
2036 /* Regenerate the info table cache from the final selected info table */
2037 nmbm_generate_info_table_cache(ni);
2038
developer49f853a2021-06-23 17:22:02 +08002039 if (ni->lower.flags & NMBM_F_READ_ONLY)
2040 return true;
2041
developer8d16ac22021-05-26 15:32:12 +08002042 /*
2043 * If only one table exists, try to write another table.
2044 * If two tables have different write count, try to update info table
2045 */
2046 if (!ni->backup_table_ba) {
2047 success = nmbm_rescue_single_info_table(ni);
2048 } else if (main_table_write_count != backup_table_write_count) {
2049 /* Mark state & mapping tables changed */
2050 ni->block_state_changed = 1;
2051 ni->block_mapping_changed = 1;
2052
2053 success = nmbm_update_single_info_table(ni,
2054 main_table_write_count < backup_table_write_count);
2055 } else {
2056 success = true;
2057 }
2058
2059 /*
2060 * If there is no spare unmapped blocks, or still only one table
2061 * exists, set the chip to read-only
2062 */
2063 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
2064 nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
2065 ni->protected = 1;
2066 } else if (!success) {
2067 nlog_warn(ni, "Only one info table found. Device is now read-only\n");
2068 ni->protected = 1;
2069 }
2070
2071 return true;
2072}
2073
2074/*
2075 * nmbm_load_existing - Load NMBM from a new chip
2076 * @ni: NMBM instance structure
2077 */
2078static bool nmbm_load_existing(struct nmbm_instance *ni)
2079{
2080 bool success;
2081
2082 /* Calculate the boundary of management blocks */
2083 ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
2084
2085 nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
2086 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
2087 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
2088 ni->signature_ba - 1);
2089
2090 /* Look for info table(s) */
2091 success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
2092 ni->signature_ba);
2093 if (success) {
developer49f853a2021-06-23 17:22:02 +08002094 nlog_info(ni, "NMBM has been successfully attached %s\n",
2095 (ni->lower.flags & NMBM_F_READ_ONLY) ? "in read-only mode" : "");
developer8d16ac22021-05-26 15:32:12 +08002096 return true;
2097 }
2098
2099 if (!(ni->lower.flags & NMBM_F_CREATE))
2100 return false;
2101
2102 /* Fill block state table & mapping table */
2103 nmbm_scan_badblocks(ni);
2104 nmbm_build_mapping_table(ni);
2105
developer49f853a2021-06-23 17:22:02 +08002106 if (ni->lower.flags & NMBM_F_READ_ONLY) {
2107 nlog_info(ni, "NMBM has been initialized in read-only mode\n");
2108 return true;
2109 }
2110
developer8d16ac22021-05-26 15:32:12 +08002111 /* Write info table(s) */
2112 success = nmbm_create_info_table(ni);
2113 if (success) {
2114 nlog_info(ni, "NMBM has been successfully created\n");
2115 return true;
2116 }
2117
2118 return false;
2119}
2120
2121/*
2122 * nmbm_find_signature - Find signature in the lower NAND chip
2123 * @ni: NMBM instance structure
2124 * @signature_ba: used for storing block address of the signature
2125 * @signature_ba: return the actual block address of signature block
2126 *
2127 * Find a valid signature from a specific range in the lower NAND chip,
2128 * from bottom (highest address) to top (lowest address)
2129 *
2130 * Return true if found.
2131 */
2132static bool nmbm_find_signature(struct nmbm_instance *ni,
2133 struct nmbm_signature *signature,
2134 uint32_t *signature_ba)
2135{
2136 struct nmbm_signature sig;
2137 uint64_t off, addr;
2138 uint32_t block_count, ba, limit;
2139 bool success;
2140 int ret;
2141
2142 /* Calculate top and bottom block address */
2143 block_count = ni->lower.size >> ni->erasesize_shift;
2144 ba = block_count;
2145 limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->lower.max_ratio);
2146 if (ni->lower.max_reserved_blocks && block_count - limit > ni->lower.max_reserved_blocks)
2147 limit = block_count - ni->lower.max_reserved_blocks;
2148
2149 while (ba >= limit) {
2150 WATCHDOG_RESET();
2151
2152 ba--;
2153 addr = ba2addr(ni, ba);
2154
2155 if (nmbm_check_bad_phys_block(ni, ba))
2156 continue;
2157
2158 /* Check every page.
2159 * As long as at leaset one page contains valid signature,
2160 * the block is treated as a valid signature block.
2161 */
2162 for (off = 0; off < ni->lower.erasesize;
2163 off += ni->lower.writesize) {
2164 WATCHDOG_RESET();
2165
2166 ret = nmbn_read_data(ni, addr + off, &sig,
2167 sizeof(sig));
2168 if (ret)
2169 continue;
2170
2171 /* Check for header size and checksum */
2172 success = nmbm_check_header(&sig, sizeof(sig));
2173 if (!success)
2174 continue;
2175
2176 /* Check for header magic */
2177 if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
2178 /* Found it */
2179 memcpy(signature, &sig, sizeof(sig));
2180 *signature_ba = ba;
2181 return true;
2182 }
2183 }
2184 };
2185
2186 return false;
2187}
2188
2189/*
2190 * is_power_of_2_u64 - Check whether a 64-bit integer is power of 2
2191 * @n: number to check
2192 */
2193static bool is_power_of_2_u64(uint64_t n)
2194{
2195 return (n != 0 && ((n & (n - 1)) == 0));
2196}
2197
2198/*
2199 * nmbm_check_lower_members - Validate the members of lower NAND device
2200 * @nld: Lower NAND chip structure
2201 */
2202static bool nmbm_check_lower_members(struct nmbm_lower_device *nld)
2203{
2204
2205 if (!nld->size || !is_power_of_2_u64(nld->size)) {
2206 nmbm_log_lower(nld, NMBM_LOG_ERR,
2207 "Chip size %llu is not valid\n", nld->size);
2208 return false;
2209 }
2210
2211 if (!nld->erasesize || !is_power_of_2(nld->erasesize)) {
2212 nmbm_log_lower(nld, NMBM_LOG_ERR,
2213 "Block size %u is not valid\n", nld->erasesize);
2214 return false;
2215 }
2216
2217 if (!nld->writesize || !is_power_of_2(nld->writesize)) {
2218 nmbm_log_lower(nld, NMBM_LOG_ERR,
2219 "Page size %u is not valid\n", nld->writesize);
2220 return false;
2221 }
2222
developer6e6a3d12022-05-09 15:17:32 +08002223 if (!nld->oobsize) {
developer8d16ac22021-05-26 15:32:12 +08002224 nmbm_log_lower(nld, NMBM_LOG_ERR,
2225 "Page spare size %u is not valid\n", nld->oobsize);
2226 return false;
2227 }
2228
developer49f853a2021-06-23 17:22:02 +08002229 if (!nld->read_page) {
2230 nmbm_log_lower(nld, NMBM_LOG_ERR, "read_page() is required\n");
2231 return false;
2232 }
2233
2234 if (!(nld->flags & NMBM_F_READ_ONLY) && (!nld->write_page || !nld->erase_block)) {
developer8d16ac22021-05-26 15:32:12 +08002235 nmbm_log_lower(nld, NMBM_LOG_ERR,
developer49f853a2021-06-23 17:22:02 +08002236 "write_page() and erase_block() are required\n");
developer8d16ac22021-05-26 15:32:12 +08002237 return false;
2238 }
2239
2240 /* Data sanity check */
2241 if (!nld->max_ratio)
2242 nld->max_ratio = 1;
2243
2244 if (nld->max_ratio >= NMBM_MGMT_DIV - 1) {
2245 nmbm_log_lower(nld, NMBM_LOG_ERR,
2246 "max ratio %u is invalid\n", nld->max_ratio);
2247 return false;
2248 }
2249
2250 if (nld->max_reserved_blocks && nld->max_reserved_blocks < NMBM_MGMT_BLOCKS_MIN) {
2251 nmbm_log_lower(nld, NMBM_LOG_ERR,
2252 "max reserved blocks %u is too small\n", nld->max_reserved_blocks);
2253 return false;
2254 }
2255
2256 return true;
2257}
2258
2259/*
2260 * nmbm_calc_structure_size - Calculate the instance structure size
2261 * @nld: NMBM lower device structure
2262 */
2263size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld)
2264{
2265 uint32_t state_table_size, mapping_table_size, info_table_size;
2266 uint32_t block_count;
2267
2268 block_count = nmbm_lldiv(nld->size, nld->erasesize);
2269
2270 /* Calculate info table size */
2271 state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2272 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2273 mapping_table_size = block_count * sizeof(int32_t);
2274
2275 info_table_size = NMBM_ALIGN(sizeof(struct nmbm_info_table_header),
2276 nld->writesize);
2277 info_table_size += NMBM_ALIGN(state_table_size, nld->writesize);
2278 info_table_size += NMBM_ALIGN(mapping_table_size, nld->writesize);
2279
2280 return info_table_size + state_table_size + mapping_table_size +
2281 nld->writesize + nld->oobsize + sizeof(struct nmbm_instance);
2282}
2283
2284/*
2285 * nmbm_init_structure - Initialize members of instance structure
2286 * @ni: NMBM instance structure
2287 */
2288static void nmbm_init_structure(struct nmbm_instance *ni)
2289{
2290 uint32_t pages_per_block, blocks_per_chip;
2291 uintptr_t ptr;
2292
2293 pages_per_block = ni->lower.erasesize / ni->lower.writesize;
2294 blocks_per_chip = nmbm_lldiv(ni->lower.size, ni->lower.erasesize);
2295
2296 ni->rawpage_size = ni->lower.writesize + ni->lower.oobsize;
2297 ni->rawblock_size = pages_per_block * ni->rawpage_size;
2298 ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
2299
2300 ni->writesize_mask = ni->lower.writesize - 1;
2301 ni->erasesize_mask = ni->lower.erasesize - 1;
2302
2303 ni->writesize_shift = ffs(ni->lower.writesize) - 1;
2304 ni->erasesize_shift = ffs(ni->lower.erasesize) - 1;
2305
2306 /* Calculate number of block this chip */
2307 ni->block_count = ni->lower.size >> ni->erasesize_shift;
2308
2309 /* Calculate info table size */
2310 ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2311 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2312 ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
2313
2314 ni->info_table_size = NMBM_ALIGN(sizeof(ni->info_table),
2315 ni->lower.writesize);
2316 ni->info_table.state_table_off = ni->info_table_size;
2317
2318 ni->info_table_size += NMBM_ALIGN(ni->state_table_size,
2319 ni->lower.writesize);
2320 ni->info_table.mapping_table_off = ni->info_table_size;
2321
2322 ni->info_table_size += NMBM_ALIGN(ni->mapping_table_size,
2323 ni->lower.writesize);
2324
2325 ni->info_table_spare_blocks = nmbm_get_spare_block_count(
2326 size2blk(ni, ni->info_table_size));
2327
2328 /* Assign memory to members */
2329 ptr = (uintptr_t)ni + sizeof(*ni);
2330
2331 ni->info_table_cache = (void *)ptr;
2332 ptr += ni->info_table_size;
2333
2334 ni->block_state = (void *)ptr;
2335 ptr += ni->state_table_size;
2336
2337 ni->block_mapping = (void *)ptr;
2338 ptr += ni->mapping_table_size;
2339
2340 ni->page_cache = (uint8_t *)ptr;
2341
2342 /* Initialize block state table */
2343 ni->block_state_changed = 0;
2344 memset(ni->block_state, 0xff, ni->state_table_size);
2345
2346 /* Initialize block mapping table */
2347 ni->block_mapping_changed = 0;
2348}
2349
2350/*
2351 * nmbm_attach - Attach to a lower device
2352 * @nld: NMBM lower device structure
2353 * @ni: NMBM instance structure
2354 */
2355int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni)
2356{
2357 bool success;
2358
2359 if (!nld || !ni)
2360 return -EINVAL;
2361
2362 /* Set default log level */
2363 ni->log_display_level = NMBM_DEFAULT_LOG_LEVEL;
2364
2365 /* Check lower members */
2366 success = nmbm_check_lower_members(nld);
2367 if (!success)
2368 return -EINVAL;
2369
2370 /* Initialize NMBM instance */
2371 memcpy(&ni->lower, nld, sizeof(struct nmbm_lower_device));
2372 nmbm_init_structure(ni);
2373
2374 success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
2375 if (!success) {
2376 if (!(nld->flags & NMBM_F_CREATE)) {
2377 nlog_err(ni, "Signature not found\n");
2378 return -ENODEV;
2379 }
2380
2381 success = nmbm_create_new(ni);
2382 if (!success)
2383 return -ENODEV;
2384
2385 return 0;
2386 }
2387
2388 nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
2389 ni->signature_ba, ba2addr(ni, ni->signature_ba));
2390 nmbm_mark_block_color_signature(ni, ni->signature_ba);
2391
2392 if (ni->signature.header.version != NMBM_VER) {
2393 nlog_err(ni, "NMBM version %u.%u is not supported\n",
2394 NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
2395 NMBM_VERSION_MINOR_GET(ni->signature.header.version));
2396 return -EINVAL;
2397 }
2398
2399 if (ni->signature.nand_size != nld->size ||
2400 ni->signature.block_size != nld->erasesize ||
2401 ni->signature.page_size != nld->writesize ||
2402 ni->signature.spare_size != nld->oobsize) {
2403 nlog_err(ni, "NMBM configuration mismatch\n");
2404 return -EINVAL;
2405 }
2406
2407 success = nmbm_load_existing(ni);
2408 if (!success)
2409 return -ENODEV;
2410
2411 return 0;
2412}
2413
2414/*
2415 * nmbm_detach - Detach from a lower device, and save all tables
2416 * @ni: NMBM instance structure
2417 */
2418int nmbm_detach(struct nmbm_instance *ni)
2419{
2420 if (!ni)
2421 return -EINVAL;
2422
developer49f853a2021-06-23 17:22:02 +08002423 if (!(ni->lower.flags & NMBM_F_READ_ONLY))
2424 nmbm_update_info_table(ni);
developer8d16ac22021-05-26 15:32:12 +08002425
2426 nmbm_mark_block_color_normal(ni, 0, ni->block_count - 1);
2427
2428 return 0;
2429}
2430
2431/*
2432 * nmbm_erase_logic_block - Erase a logic block
2433 * @ni: NMBM instance structure
2434 * @nmbm_erase_logic_block: logic block address
2435 *
2436 * Logic block will be mapped to physical block before erasing.
2437 * Bad block found during erasinh will be remapped to a good block if there is
2438 * still at least one good spare block available.
2439 */
2440static int nmbm_erase_logic_block(struct nmbm_instance *ni, uint32_t block_addr)
2441{
2442 uint32_t pb;
2443 bool success;
2444
2445retry:
2446 /* Map logic block to physical block */
2447 pb = ni->block_mapping[block_addr];
2448
2449 /* Whether the logic block is good (has valid mapping) */
2450 if ((int32_t)pb < 0) {
2451 nlog_debug(ni, "Logic block %u is a bad block\n", block_addr);
2452 return -EIO;
2453 }
2454
2455 /* Remap logic block if current physical block is a bad block */
2456 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD ||
2457 nmbm_get_block_state(ni, pb) == BLOCK_ST_NEED_REMAP)
2458 goto remap_logic_block;
developer4f9017d2021-06-16 17:18:47 +08002459
2460 /* Insurance to detect unexpected bad block marked by user */
2461 if (nmbm_check_bad_phys_block(ni, pb)) {
2462 nlog_warn(ni, "Found unexpected bad block possibly marked by user\n");
2463 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2464 goto remap_logic_block;
2465 }
developer8d16ac22021-05-26 15:32:12 +08002466
developer28a313b2021-06-16 17:23:34 +08002467 success = nmbm_erase_block_and_check(ni, pb);
developer8d16ac22021-05-26 15:32:12 +08002468 if (success)
2469 return 0;
2470
2471 /* Mark bad block */
2472 nmbm_mark_phys_bad_block(ni, pb);
2473 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2474
2475remap_logic_block:
2476 /* Try to assign a new block */
2477 success = nmbm_map_block(ni, block_addr);
2478 if (!success) {
2479 /* Mark logic block unusable, and update info table */
2480 ni->block_mapping[block_addr] = -1;
2481 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2482 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2483 nmbm_update_info_table(ni);
2484 return -EIO;
2485 }
2486
2487 /* Update info table before erasing */
2488 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2489 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2490 nmbm_update_info_table(ni);
2491
2492 goto retry;
2493}
2494
2495/*
2496 * nmbm_erase_block_range - Erase logic blocks
2497 * @ni: NMBM instance structure
2498 * @addr: logic linear address
2499 * @size: erase range
2500 * @failed_addr: return failed block address if error occurs
2501 */
2502int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
2503 uint64_t size, uint64_t *failed_addr)
2504{
2505 uint32_t start_ba, end_ba;
2506 int ret;
2507
2508 if (!ni)
2509 return -EINVAL;
2510
2511 /* Sanity check */
developer49f853a2021-06-23 17:22:02 +08002512 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
developer8d16ac22021-05-26 15:32:12 +08002513 nlog_debug(ni, "Device is forced read-only\n");
2514 return -EROFS;
2515 }
2516
2517 if (addr >= ba2addr(ni, ni->data_block_count)) {
2518 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2519 return -EINVAL;
2520 }
2521
2522 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2523 nlog_err(ni, "Erase range 0xllxu is too large\n", size);
2524 return -EINVAL;
2525 }
2526
2527 if (!size) {
2528 nlog_warn(ni, "No blocks to be erased\n");
2529 return 0;
2530 }
2531
2532 start_ba = addr2ba(ni, addr);
2533 end_ba = addr2ba(ni, addr + size - 1);
2534
2535 while (start_ba <= end_ba) {
2536 WATCHDOG_RESET();
2537
2538 ret = nmbm_erase_logic_block(ni, start_ba);
2539 if (ret) {
2540 if (failed_addr)
2541 *failed_addr = ba2addr(ni, start_ba);
2542 return ret;
2543 }
2544
2545 start_ba++;
2546 }
2547
2548 return 0;
2549}
2550
2551/*
2552 * nmbm_read_logic_page - Read page based on logic address
2553 * @ni: NMBM instance structure
2554 * @addr: logic linear address
2555 * @data: buffer to store main data. optional.
2556 * @oob: buffer to store oob data. optional.
2557 * @mode: read mode
developerd1457c92021-06-16 17:23:18 +08002558 *
2559 * Return 0 for success, positive value for corrected bitflip count,
2560 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002561 */
2562static int nmbm_read_logic_page(struct nmbm_instance *ni, uint64_t addr,
2563 void *data, void *oob, enum nmbm_oob_mode mode)
2564{
2565 uint32_t lb, pb, offset;
2566 uint64_t paddr;
developer8d16ac22021-05-26 15:32:12 +08002567
2568 /* Extract block address and in-block offset */
2569 lb = addr2ba(ni, addr);
2570 offset = addr & ni->erasesize_mask;
2571
2572 /* Map logic block to physical block */
2573 pb = ni->block_mapping[lb];
2574
2575 /* Whether the logic block is good (has valid mapping) */
2576 if ((int32_t)pb < 0) {
2577 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2578 return -EIO;
2579 }
2580
2581 /* Fail if physical block is marked bad */
2582 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2583 return -EIO;
2584
2585 /* Assemble new address */
2586 paddr = ba2addr(ni, pb) + offset;
2587
developer55097772021-06-16 17:23:50 +08002588 return nmbm_read_phys_page(ni, paddr, data, oob, mode);
developer8d16ac22021-05-26 15:32:12 +08002589}
2590
2591/*
2592 * nmbm_read_single_page - Read one page based on logic address
2593 * @ni: NMBM instance structure
2594 * @addr: logic linear address
2595 * @data: buffer to store main data. optional.
2596 * @oob: buffer to store oob data. optional.
2597 * @mode: read mode
developerd1457c92021-06-16 17:23:18 +08002598 *
2599 * Return 0 for success, positive value for corrected bitflip count,
2600 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002601 */
2602int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
2603 void *oob, enum nmbm_oob_mode mode)
2604{
2605 if (!ni)
2606 return -EINVAL;
2607
2608 /* Sanity check */
2609 if (ni->protected) {
2610 nlog_debug(ni, "Device is forced read-only\n");
2611 return -EROFS;
2612 }
2613
2614 if (addr >= ba2addr(ni, ni->data_block_count)) {
2615 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2616 return -EINVAL;
2617 }
2618
2619 return nmbm_read_logic_page(ni, addr, data, oob, mode);
2620}
2621
2622/*
2623 * nmbm_read_range - Read data without oob
2624 * @ni: NMBM instance structure
2625 * @addr: logic linear address
2626 * @size: data size to read
2627 * @data: buffer to store main data to be read
2628 * @mode: read mode
2629 * @retlen: return actual data size read
developerd1457c92021-06-16 17:23:18 +08002630 *
2631 * Return 0 for success, positive value for corrected bitflip count,
2632 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002633 */
2634int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2635 void *data, enum nmbm_oob_mode mode, size_t *retlen)
2636{
2637 uint64_t off = addr;
2638 uint8_t *ptr = data;
2639 size_t sizeremain = size, chunksize, leading;
developerd1457c92021-06-16 17:23:18 +08002640 bool has_ecc_err = false;
2641 int ret, max_bitflips = 0;
developer8d16ac22021-05-26 15:32:12 +08002642
2643 if (!ni)
2644 return -EINVAL;
2645
2646 /* Sanity check */
2647 if (ni->protected) {
2648 nlog_debug(ni, "Device is forced read-only\n");
2649 return -EROFS;
2650 }
2651
2652 if (addr >= ba2addr(ni, ni->data_block_count)) {
2653 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2654 return -EINVAL;
2655 }
2656
2657 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2658 nlog_err(ni, "Read range 0x%llx is too large\n", size);
2659 return -EINVAL;
2660 }
2661
2662 if (!size) {
2663 nlog_warn(ni, "No data to be read\n");
2664 return 0;
2665 }
2666
2667 while (sizeremain) {
2668 WATCHDOG_RESET();
2669
2670 leading = off & ni->writesize_mask;
2671 chunksize = ni->lower.writesize - leading;
2672 if (chunksize > sizeremain)
2673 chunksize = sizeremain;
2674
2675 if (chunksize == ni->lower.writesize) {
2676 ret = nmbm_read_logic_page(ni, off - leading, ptr,
2677 NULL, mode);
developerd1457c92021-06-16 17:23:18 +08002678 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +08002679 break;
2680 } else {
2681 ret = nmbm_read_logic_page(ni, off - leading,
2682 ni->page_cache, NULL,
2683 mode);
developerd1457c92021-06-16 17:23:18 +08002684 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +08002685 break;
2686
2687 memcpy(ptr, ni->page_cache + leading, chunksize);
2688 }
2689
developerd1457c92021-06-16 17:23:18 +08002690 if (ret == -EBADMSG)
2691 has_ecc_err = true;
2692
2693 if (ret > max_bitflips)
2694 max_bitflips = ret;
2695
developer8d16ac22021-05-26 15:32:12 +08002696 off += chunksize;
2697 ptr += chunksize;
2698 sizeremain -= chunksize;
2699 }
2700
2701 if (retlen)
2702 *retlen = size - sizeremain;
2703
developerd1457c92021-06-16 17:23:18 +08002704 if (ret < 0 && ret != -EBADMSG)
2705 return ret;
2706
2707 if (has_ecc_err)
2708 return -EBADMSG;
2709
2710 return max_bitflips;
developer8d16ac22021-05-26 15:32:12 +08002711}
2712
2713/*
2714 * nmbm_write_logic_page - Read page based on logic address
2715 * @ni: NMBM instance structure
2716 * @addr: logic linear address
2717 * @data: buffer contains main data. optional.
2718 * @oob: buffer contains oob data. optional.
2719 * @mode: write mode
2720 */
2721static int nmbm_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
2722 const void *data, const void *oob,
2723 enum nmbm_oob_mode mode)
2724{
2725 uint32_t lb, pb, offset;
2726 uint64_t paddr;
2727 bool success;
2728
2729 /* Extract block address and in-block offset */
2730 lb = addr2ba(ni, addr);
2731 offset = addr & ni->erasesize_mask;
2732
2733 /* Map logic block to physical block */
2734 pb = ni->block_mapping[lb];
2735
2736 /* Whether the logic block is good (has valid mapping) */
2737 if ((int32_t)pb < 0) {
2738 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2739 return -EIO;
2740 }
2741
2742 /* Fail if physical block is marked bad */
2743 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2744 return -EIO;
2745
2746 /* Assemble new address */
2747 paddr = ba2addr(ni, pb) + offset;
2748
2749 success = nmbm_write_phys_page(ni, paddr, data, oob, mode);
2750 if (success)
2751 return 0;
2752
2753 /*
2754 * Do not remap bad block here. Just mark this block in state table.
2755 * Remap this block on erasing.
2756 */
2757 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2758 nmbm_update_info_table(ni);
2759
2760 return -EIO;
2761}
2762
2763/*
developerd8cf71c2023-06-20 19:10:04 +08002764 * nmbm_panic_write_logic_page - Panic write page based on logic address
2765 * @ni: NMBM instance structure
2766 * @addr: logic linear address
2767 * @data: buffer contains main data. optional.
2768 */
2769static int nmbm_panic_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
2770 const void *data)
2771{
2772 uint32_t lb, pb, offset;
2773 uint64_t paddr;
2774 bool success;
2775
2776 if (!ni->lower.panic_write_page)
2777 return -ENOTSUPP;
2778
2779 /* Extract block address and in-block offset */
2780 lb = addr2ba(ni, addr);
2781 offset = addr & ni->erasesize_mask;
2782
2783 /* Map logic block to physical block */
2784 pb = ni->block_mapping[lb];
2785
2786 /* Whether the logic block is good (has valid mapping) */
2787 if ((int32_t)pb < 0) {
2788 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2789 return -EIO;
2790 }
2791
2792 /* Fail if physical block is marked bad */
2793 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2794 return -EIO;
2795
2796 /* Assemble new address */
2797 paddr = ba2addr(ni, pb) + offset;
2798
2799 success = nmbm_panic_write_phys_page(ni, paddr, data);
2800 if (success)
2801 return 0;
2802
2803 /*
2804 * Do not remap bad block here. Just mark this block in state table.
2805 * Remap this block on erasing.
2806 */
2807 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2808 nmbm_update_info_table(ni);
2809
2810 return -EIO;
2811}
2812
2813/*
developer8d16ac22021-05-26 15:32:12 +08002814 * nmbm_write_single_page - Write one page based on logic address
2815 * @ni: NMBM instance structure
2816 * @addr: logic linear address
2817 * @data: buffer contains main data. optional.
2818 * @oob: buffer contains oob data. optional.
2819 * @mode: write mode
2820 */
2821int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
2822 const void *data, const void *oob,
2823 enum nmbm_oob_mode mode)
2824{
2825 if (!ni)
2826 return -EINVAL;
2827
2828 /* Sanity check */
developer49f853a2021-06-23 17:22:02 +08002829 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
developer8d16ac22021-05-26 15:32:12 +08002830 nlog_debug(ni, "Device is forced read-only\n");
2831 return -EROFS;
2832 }
2833
2834 if (addr >= ba2addr(ni, ni->data_block_count)) {
2835 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2836 return -EINVAL;
2837 }
2838
2839 return nmbm_write_logic_page(ni, addr, data, oob, mode);
2840}
2841
2842/*
developerd8cf71c2023-06-20 19:10:04 +08002843 * nmbm_panic_write_single_page - Panic write one page based on logic address
2844 * @ni: NMBM instance structure
2845 * @addr: logic linear address
2846 * @data: buffer contains main data. optional.
2847 */
2848int nmbm_panic_write_single_page(struct nmbm_instance *ni, uint64_t addr,
2849 const void *data)
2850{
2851 if (!ni)
2852 return -EINVAL;
2853
2854 /* Sanity check */
2855 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
2856 nlog_debug(ni, "Device is forced read-only\n");
2857 return -EROFS;
2858 }
2859
2860 if (addr >= ba2addr(ni, ni->data_block_count)) {
2861 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2862 return -EINVAL;
2863 }
2864
2865 return nmbm_panic_write_logic_page(ni, addr, data);
2866}
2867
2868/*
developer8d16ac22021-05-26 15:32:12 +08002869 * nmbm_write_range - Write data without oob
2870 * @ni: NMBM instance structure
2871 * @addr: logic linear address
2872 * @size: data size to write
2873 * @data: buffer contains data to be written
2874 * @mode: write mode
2875 * @retlen: return actual data size written
2876 */
2877int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2878 const void *data, enum nmbm_oob_mode mode,
2879 size_t *retlen)
2880{
2881 uint64_t off = addr;
2882 const uint8_t *ptr = data;
2883 size_t sizeremain = size, chunksize, leading;
2884 int ret;
2885
2886 if (!ni)
2887 return -EINVAL;
2888
2889 /* Sanity check */
developer49f853a2021-06-23 17:22:02 +08002890 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
developer8d16ac22021-05-26 15:32:12 +08002891 nlog_debug(ni, "Device is forced read-only\n");
2892 return -EROFS;
2893 }
2894
2895 if (addr >= ba2addr(ni, ni->data_block_count)) {
2896 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2897 return -EINVAL;
2898 }
2899
2900 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2901 nlog_err(ni, "Write size 0x%zx is too large\n", size);
2902 return -EINVAL;
2903 }
2904
2905 if (!size) {
2906 nlog_warn(ni, "No data to be written\n");
2907 return 0;
2908 }
2909
2910 while (sizeremain) {
2911 WATCHDOG_RESET();
2912
2913 leading = off & ni->writesize_mask;
2914 chunksize = ni->lower.writesize - leading;
2915 if (chunksize > sizeremain)
2916 chunksize = sizeremain;
2917
2918 if (chunksize == ni->lower.writesize) {
2919 ret = nmbm_write_logic_page(ni, off - leading, ptr,
2920 NULL, mode);
2921 if (ret)
2922 break;
2923 } else {
2924 memset(ni->page_cache, 0xff, leading);
2925 memcpy(ni->page_cache + leading, ptr, chunksize);
2926
2927 ret = nmbm_write_logic_page(ni, off - leading,
2928 ni->page_cache, NULL,
2929 mode);
2930 if (ret)
2931 break;
2932 }
2933
2934 off += chunksize;
2935 ptr += chunksize;
2936 sizeremain -= chunksize;
2937 }
2938
2939 if (retlen)
2940 *retlen = size - sizeremain;
2941
2942 return ret;
2943}
2944
2945/*
2946 * nmbm_check_bad_block - Check whether a logic block is usable
2947 * @ni: NMBM instance structure
2948 * @addr: logic linear address
2949 */
2950int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr)
2951{
2952 uint32_t lb, pb;
2953
2954 if (!ni)
2955 return -EINVAL;
2956
2957 if (addr >= ba2addr(ni, ni->data_block_count)) {
2958 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2959 return -EINVAL;
2960 }
2961
2962 lb = addr2ba(ni, addr);
2963
2964 /* Map logic block to physical block */
2965 pb = ni->block_mapping[lb];
2966
2967 if ((int32_t)pb < 0)
2968 return 1;
2969
2970 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2971 return 1;
2972
2973 return 0;
2974}
2975
2976/*
2977 * nmbm_mark_bad_block - Mark a logic block unusable
2978 * @ni: NMBM instance structure
2979 * @addr: logic linear address
2980 */
2981int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr)
2982{
2983 uint32_t lb, pb;
2984
2985 if (!ni)
2986 return -EINVAL;
2987
developer49f853a2021-06-23 17:22:02 +08002988 /* Sanity check */
2989 if (ni->protected || (ni->lower.flags & NMBM_F_READ_ONLY)) {
2990 nlog_debug(ni, "Device is forced read-only\n");
2991 return -EROFS;
2992 }
2993
developer8d16ac22021-05-26 15:32:12 +08002994 if (addr >= ba2addr(ni, ni->data_block_count)) {
2995 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2996 return -EINVAL;
2997 }
2998
2999 lb = addr2ba(ni, addr);
3000
3001 /* Map logic block to physical block */
3002 pb = ni->block_mapping[lb];
3003
3004 if ((int32_t)pb < 0)
3005 return 0;
3006
3007 ni->block_mapping[lb] = -1;
3008 nmbm_mark_phys_bad_block(ni, pb);
3009 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
3010 nmbm_update_info_table(ni);
3011
3012 return 0;
3013}
3014
3015/*
3016 * nmbm_get_avail_size - Get available user data size
3017 * @ni: NMBM instance structure
3018 */
3019uint64_t nmbm_get_avail_size(struct nmbm_instance *ni)
3020{
3021 if (!ni)
3022 return 0;
3023
3024 return (uint64_t)ni->data_block_count << ni->erasesize_shift;
3025}
3026
3027/*
3028 * nmbm_get_lower_device - Get lower device structure
3029 * @ni: NMBM instance structure
3030 * @nld: pointer to hold the data of lower device structure
3031 */
3032int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld)
3033{
3034 if (!ni)
3035 return -EINVAL;
3036
3037 if (nld)
3038 memcpy(nld, &ni->lower, sizeof(*nld));
3039
3040 return 0;
3041}
3042
3043#include "nmbm-debug.inl"