blob: d762d6a118d53ea632f2f4d0d5d372e7aa7559d0 [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2021 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "nmbm-private.h"
9
10#include "nmbm-debug.h"
11
12#define NMBM_VER_MAJOR 1
13#define NMBM_VER_MINOR 0
14#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
15 NMBM_VER_MINOR)
16
17#define NMBM_ALIGN(v, a) (((v) + (a) - 1) & ~((a) - 1))
18
19/*****************************************************************************/
20/* Logging related functions */
21/*****************************************************************************/
22
23/*
24 * nmbm_log_lower - Print log using OS specific routine
25 * @nld: NMBM lower device structure
26 * @level: log level
27 * @fmt: format string
28 */
29static void nmbm_log_lower(struct nmbm_lower_device *nld,
30 enum nmbm_log_category level, const char *fmt, ...)
31{
32 va_list ap;
33
34 if (!nld->logprint)
35 return;
36
37 va_start(ap, fmt);
38 nld->logprint(nld->arg, level, fmt, ap);
39 va_end(ap);
40}
41
42/*
43 * nmbm_log - Print log using OS specific routine
44 * @ni: NMBM instance structure
45 * @level: log level
46 * @fmt: format string
47 */
48static void nmbm_log(struct nmbm_instance *ni, enum nmbm_log_category level,
49 const char *fmt, ...)
50{
51 va_list ap;
52
53 if (!ni)
54 return;
55
56 if (!ni->lower.logprint || level < ni->log_display_level)
57 return;
58
59 va_start(ap, fmt);
60 ni->lower.logprint(ni->lower.arg, level, fmt, ap);
61 va_end(ap);
62}
63
64/*
65 * nmbm_set_log_level - Set log display level
66 * @ni: NMBM instance structure
67 * @level: log display level
68 */
69enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
70 enum nmbm_log_category level)
71{
72 enum nmbm_log_category old;
73
74 if (!ni)
75 return __NMBM_LOG_MAX;
76
77 old = ni->log_display_level;
78 ni->log_display_level = level;
79 return old;
80}
81
82/*
83 * nlog_table_creation - Print log of table creation event
84 * @ni: NMBM instance structure
85 * @main_table: whether the table is main info table
86 * @start_ba: start block address of the table
87 * @end_ba: block address after the end of the table
88 */
89static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
90 uint32_t start_ba, uint32_t end_ba)
91{
92 if (start_ba == end_ba - 1)
93 nlog_info(ni, "%s info table has been written to block %u\n",
94 main_table ? "Main" : "Backup", start_ba);
95 else
96 nlog_info(ni, "%s info table has been written to block %u-%u\n",
97 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
98
99 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
100}
101
102/*
103 * nlog_table_update - Print log of table update event
104 * @ni: NMBM instance structure
105 * @main_table: whether the table is main info table
106 * @start_ba: start block address of the table
107 * @end_ba: block address after the end of the table
108 */
109static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
110 uint32_t start_ba, uint32_t end_ba)
111{
112 if (start_ba == end_ba - 1)
113 nlog_debug(ni, "%s info table has been updated in block %u\n",
114 main_table ? "Main" : "Backup", start_ba);
115 else
116 nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
117 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
118
119 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
120}
121
122/*
123 * nlog_table_found - Print log of table found event
124 * @ni: NMBM instance structure
125 * @first_table: whether the table is first found info table
126 * @write_count: write count of the info table
127 * @start_ba: start block address of the table
128 * @end_ba: block address after the end of the table
129 */
130static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
131 uint32_t write_count, uint32_t start_ba,
132 uint32_t end_ba)
133{
134 if (start_ba == end_ba - 1)
135 nlog_info(ni, "%s info table with writecount %u found in block %u\n",
136 first_table ? "First" : "Second", write_count,
137 start_ba);
138 else
139 nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
140 first_table ? "First" : "Second", write_count,
141 start_ba, end_ba - 1);
142
143 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
144}
145
146/*****************************************************************************/
147/* Address conversion functions */
148/*****************************************************************************/
149
150/*
151 * addr2ba - Convert a linear address to block address
152 * @ni: NMBM instance structure
153 * @addr: Linear address
154 */
155static uint32_t addr2ba(struct nmbm_instance *ni, uint64_t addr)
156{
157 return addr >> ni->erasesize_shift;
158}
159
160/*
161 * ba2addr - Convert a block address to linear address
162 * @ni: NMBM instance structure
163 * @ba: Block address
164 */
165static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
166{
167 return (uint64_t)ba << ni->erasesize_shift;
168}
169/*
170 * size2blk - Get minimum required blocks for storing specific size of data
171 * @ni: NMBM instance structure
172 * @size: size for storing
173 */
174static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
175{
176 return (size + ni->lower.erasesize - 1) >> ni->erasesize_shift;
177}
178
179/*****************************************************************************/
180/* High level NAND chip APIs */
181/*****************************************************************************/
182
183/*
184 * nmbm_reset_chip - Reset NAND device
185 * @nld: Lower NAND chip structure
186 */
187static void nmbm_reset_chip(struct nmbm_instance *ni)
188{
189 if (ni->lower.reset_chip)
190 ni->lower.reset_chip(ni->lower.arg);
191}
192
193/*
194 * nmbm_read_phys_page - Read page with retry
195 * @ni: NMBM instance structure
196 * @addr: linear address where the data will be read from
197 * @data: the main data to be read
198 * @oob: the oob data to be read
199 * @mode: mode for processing oob data
200 *
201 * Read a page for at most NMBM_TRY_COUNT times.
202 *
developerd1457c92021-06-16 17:23:18 +0800203 * Return 0 for success, positive value for corrected bitflip count,
204 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +0800205 */
206static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
207 void *data, void *oob, enum nmbm_oob_mode mode)
208{
209 int tries, ret;
210
211 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
212 ret = ni->lower.read_page(ni->lower.arg, addr, data, oob, mode);
developerd1457c92021-06-16 17:23:18 +0800213 if (ret >= 0)
214 return ret;
developer8d16ac22021-05-26 15:32:12 +0800215
216 nmbm_reset_chip(ni);
217 }
218
developerd1457c92021-06-16 17:23:18 +0800219 if (ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +0800220 nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
221
222 return ret;
223}
224
225/*
226 * nmbm_write_phys_page - Write page with retry
227 * @ni: NMBM instance structure
228 * @addr: linear address where the data will be written to
229 * @data: the main data to be written
230 * @oob: the oob data to be written
231 * @mode: mode for processing oob data
232 *
233 * Write a page for at most NMBM_TRY_COUNT times.
234 */
235static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
236 const void *data, const void *oob,
237 enum nmbm_oob_mode mode)
238{
239 int tries, ret;
240
241 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
242 ret = ni->lower.write_page(ni->lower.arg, addr, data, oob, mode);
243 if (!ret)
244 return true;
245
246 nmbm_reset_chip(ni);
247 }
248
249 nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
250
251 return false;
252}
253
254/*
255 * nmbm_erase_phys_block - Erase a block with retry
256 * @ni: NMBM instance structure
257 * @addr: Linear address
258 *
259 * Erase a block for at most NMBM_TRY_COUNT times.
260 */
261static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
262{
263 int tries, ret;
264
265 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
266 ret = ni->lower.erase_block(ni->lower.arg, addr);
267 if (!ret)
268 return true;
269
270 nmbm_reset_chip(ni);
271 }
272
273 nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
274
275 return false;
276}
277
278/*
279 * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
280 * @ni: NMBM instance structure
281 * @ba: block address
282 */
283static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
284{
285 uint64_t addr = ba2addr(ni, ba);
286 int ret;
287
288 if (ni->lower.is_bad_block)
289 return ni->lower.is_bad_block(ni->lower.arg, addr);
290
291 /* Treat ECC error as read success */
292 ret = nmbm_read_phys_page(ni, addr, NULL,
293 ni->page_cache + ni->lower.writesize,
developerd8912b32021-06-16 17:22:36 +0800294 NMBM_MODE_RAW);
developerd1457c92021-06-16 17:23:18 +0800295 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +0800296 return true;
297
298 return ni->page_cache[ni->lower.writesize] != 0xff;
299}
300
301/*
302 * nmbm_mark_phys_bad_block - Mark a block bad
303 * @ni: NMBM instance structure
304 * @addr: Linear address
305 */
306static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
307{
308 uint64_t addr = ba2addr(ni, ba);
309 enum nmbm_log_category level;
310 uint32_t off;
311
312 nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
313
314 if (ni->lower.mark_bad_block)
315 return ni->lower.mark_bad_block(ni->lower.arg, addr);
316
317 /* Whole page set to 0x00 */
318 memset(ni->page_cache, 0, ni->rawpage_size);
319
320 /* Write to all pages within this block, disable all errors */
321 level = nmbm_set_log_level(ni, __NMBM_LOG_MAX);
322
323 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
324 nmbm_write_phys_page(ni, addr + off, ni->page_cache,
325 ni->page_cache + ni->lower.writesize,
326 NMBM_MODE_RAW);
327 }
328
329 nmbm_set_log_level(ni, level);
330
331 return 0;
332}
333
334/*****************************************************************************/
335/* NMBM related functions */
336/*****************************************************************************/
337
338/*
339 * nmbm_check_header - Check whether a NMBM structure is valid
340 * @data: pointer to a NMBM structure with a NMBM header at beginning
341 * @size: Size of the buffer pointed by @header
342 *
343 * The size of the NMBM structure may be larger than NMBM header,
344 * e.g. block mapping table and block state table.
345 */
346static bool nmbm_check_header(const void *data, uint32_t size)
347{
348 const struct nmbm_header *header = data;
349 struct nmbm_header nhdr;
350 uint32_t new_checksum;
351
352 /*
353 * Make sure expected structure size is equal or smaller than
354 * buffer size.
355 */
356 if (header->size > size)
357 return false;
358
359 memcpy(&nhdr, data, sizeof(nhdr));
360
361 nhdr.checksum = 0;
362 new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
363 if (header->size > sizeof(nhdr))
364 new_checksum = nmbm_crc32(new_checksum,
365 (const uint8_t *)data + sizeof(nhdr),
366 header->size - sizeof(nhdr));
367
368 if (header->checksum != new_checksum)
369 return false;
370
371 return true;
372}
373
374/*
375 * nmbm_update_checksum - Update checksum of a NMBM structure
376 * @header: pointer to a NMBM structure with a NMBM header at beginning
377 *
378 * The size of the NMBM structure must be specified by @header->size
379 */
380static void nmbm_update_checksum(struct nmbm_header *header)
381{
382 header->checksum = 0;
383 header->checksum = nmbm_crc32(0, header, header->size);
384}
385
386/*
387 * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
388 * @block_count: number of blocks of data
389 *
390 * Calculate number of blocks should be reserved for data
391 */
392static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
393{
394 uint32_t val;
395
396 val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
397 val *= NMBM_SPARE_BLOCK_MULTI;
398
399 if (val < NMBM_SPARE_BLOCK_MIN)
400 val = NMBM_SPARE_BLOCK_MIN;
401
402 return val;
403}
404
405/*
406 * nmbm_get_block_state_raw - Get state of a block from raw block state table
407 * @block_state: pointer to raw block state table (bitmap)
408 * @ba: block address
409 */
410static uint32_t nmbm_get_block_state_raw(nmbm_bitmap_t *block_state,
411 uint32_t ba)
412{
413 uint32_t unit, shift;
414
415 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
416 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
417
418 return (block_state[unit] >> shift) & BLOCK_ST_MASK;
419}
420
421/*
422 * nmbm_get_block_state - Get state of a block from block state table
423 * @ni: NMBM instance structure
424 * @ba: block address
425 */
426static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
427{
428 return nmbm_get_block_state_raw(ni->block_state, ba);
429}
430
431/*
432 * nmbm_set_block_state - Set state of a block to block state table
433 * @ni: NMBM instance structure
434 * @ba: block address
435 * @state: block state
436 *
437 * Set state of a block. If the block state changed, ni->block_state_changed
438 * will be increased.
439 */
440static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
441 uint32_t state)
442{
443 uint32_t unit, shift, orig;
444 nmbm_bitmap_t uv;
445
446 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
447 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
448
449 orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
450 state &= BLOCK_ST_MASK;
451
452 uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
453 uv |= state << shift;
454 ni->block_state[unit] = uv;
455
456 if (state == BLOCK_ST_BAD)
457 nmbm_mark_block_color_bad(ni, ba);
458
459 if (orig != state) {
460 ni->block_state_changed++;
461 return true;
462 }
463
464 return false;
465}
466
467/*
468 * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
469 * @ni: NMBM instance structure
470 * @ba: start physical block address
471 * @nba: return physical block address after walk
472 * @count: number of good blocks to be skipped
473 * @limit: highest block address allowed for walking
474 *
475 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
476 * return the next good block address.
477 *
478 * If no enough good blocks counted while @limit reached, false will be returned.
479 *
480 * If @count == 0, nearest good block address will be returned.
481 * @limit is not counted in walking.
482 */
483static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
484 uint32_t *nba, uint32_t count,
485 uint32_t limit)
486{
487 int32_t nblock = count;
488
489 if (limit >= ni->block_count)
490 limit = ni->block_count - 1;
491
492 while (ba < limit) {
493 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
494 nblock--;
495
496 if (nblock < 0) {
497 *nba = ba;
498 return true;
499 }
500
501 ba++;
502 }
503
504 return false;
505}
506
507/*
508 * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
509 * @ni: NMBM instance structure
510 * @ba: start physical block address
511 * @nba: return physical block address after walk
512 * @count: number of good blocks to be skipped
513 * @limit: lowest block address allowed for walking
514 *
515 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
516 * return the next good block address.
517 *
518 * If no enough good blocks counted while @limit reached, false will be returned.
519 *
520 * If @count == 0, nearest good block address will be returned.
521 * @limit is not counted in walking.
522 */
523static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
524 uint32_t *nba, uint32_t count, uint32_t limit)
525{
526 int32_t nblock = count;
527
528 if (limit >= ni->block_count)
529 limit = ni->block_count - 1;
530
531 while (ba > limit) {
532 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
533 nblock--;
534
535 if (nblock < 0) {
536 *nba = ba;
537 return true;
538 }
539
540 ba--;
541 }
542
543 return false;
544}
545
546/*
547 * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
548 * @ni: NMBM instance structure
549 * @ascending: whether to walk ascending
550 * @ba: start physical block address
551 * @nba: return physical block address after walk
552 * @count: number of good blocks to be skipped
553 * @limit: highest/lowest block address allowed for walking
554 *
555 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
556 * return the next good block address.
557 *
558 * If no enough good blocks counted while @limit reached, false will be returned.
559 *
560 * If @count == 0, nearest good block address will be returned.
561 * @limit can be set to negative if no limit required.
562 * @limit is not counted in walking.
563 */
564static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
565 uint32_t ba, uint32_t *nba, int32_t count,
566 int32_t limit)
567{
568 if (ascending)
569 return nmbm_block_walk_asc(ni, ba, nba, count, limit);
570
571 return nmbm_block_walk_desc(ni, ba, nba, count, limit);
572}
573
574/*
575 * nmbm_scan_badblocks - Scan and record all bad blocks
576 * @ni: NMBM instance structure
577 *
578 * Scan the entire lower NAND chip and record all bad blocks in to block state
579 * table.
580 */
581static void nmbm_scan_badblocks(struct nmbm_instance *ni)
582{
583 uint32_t ba;
584
585 for (ba = 0; ba < ni->block_count; ba++) {
586 if (nmbm_check_bad_phys_block(ni, ba)) {
587 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
588 nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
589 ba2addr(ni, ba));
590 }
591 }
592}
593
594/*
595 * nmbm_build_mapping_table - Build initial block mapping table
596 * @ni: NMBM instance structure
597 *
598 * The initial mapping table will be compatible with the stratage of
599 * factory production.
600 */
601static void nmbm_build_mapping_table(struct nmbm_instance *ni)
602{
603 uint32_t pb, lb;
604
605 for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
606 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
607 continue;
608
609 /* Always map to the next good block */
610 ni->block_mapping[lb++] = pb;
611 }
612
613 ni->data_block_count = lb;
614
615 /* Unusable/Management blocks */
616 for (pb = lb; pb < ni->block_count; pb++)
617 ni->block_mapping[pb] = -1;
618}
619
620/*
developer28a313b2021-06-16 17:23:34 +0800621 * nmbm_erase_block_and_check - Erase a block and check its usability
622 * @ni: NMBM instance structure
623 * @ba: block address to be erased
624 *
625 * Erase a block anc check its usability
626 *
627 * Return true if the block is usable, false if erasure failure or the block
628 * has too many bitflips.
629 */
630static bool nmbm_erase_block_and_check(struct nmbm_instance *ni, uint32_t ba)
631{
632 uint64_t addr, off;
633 bool success;
634 int ret;
635
636 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
637 if (!success)
638 return false;
639
640 if (!(ni->lower.flags & NMBM_F_EMPTY_PAGE_ECC_OK))
641 return true;
642
643 /* Check every page to make sure there aren't too many bitflips */
644
645 addr = ba2addr(ni, ba);
646
647 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
648 WATCHDOG_RESET();
649
650 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
651 NMBM_MODE_PLACE_OOB);
652 if (ret == -EBADMSG) {
653 /*
654 * NMBM_F_EMPTY_PAGE_ECC_OK means the empty page is
655 * still protected by ECC. So reading pages with ECC
656 * enabled and -EBADMSG means there are too many
657 * bitflips that can't be recovered, and the block
658 * containing the page should be marked bad.
659 */
660 nlog_err(ni,
661 "Too many bitflips in empty page at 0x%llx\n",
662 addr + off);
663 return false;
664 }
665 }
666
667 return true;
668}
669
670/*
developer8d16ac22021-05-26 15:32:12 +0800671 * nmbm_erase_range - Erase a range of blocks
672 * @ni: NMBM instance structure
673 * @ba: block address where the erasure will start
674 * @limit: top block address allowed for erasure
675 *
676 * Erase blocks within the specific range. Newly-found bad blocks will be
677 * marked.
678 *
679 * @limit is not counted into the allowed erasure address.
680 */
681static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
682 uint32_t limit)
683{
684 bool success;
685
686 while (ba < limit) {
687 WATCHDOG_RESET();
688
689 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
690 goto next_block;
691
developer4f9017d2021-06-16 17:18:47 +0800692 /* Insurance to detect unexpected bad block marked by user */
693 if (nmbm_check_bad_phys_block(ni, ba)) {
694 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
695 goto next_block;
696 }
697
developer28a313b2021-06-16 17:23:34 +0800698 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800699 if (success)
700 goto next_block;
701
702 nmbm_mark_phys_bad_block(ni, ba);
703 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
704
705 next_block:
706 ba++;
707 }
708}
709
710/*
711 * nmbm_write_repeated_data - Write critical data to a block with retry
712 * @ni: NMBM instance structure
713 * @ba: block address where the data will be written to
714 * @data: the data to be written
715 * @size: size of the data
716 *
717 * Write data to every page of the block. Success only if all pages within
718 * this block have been successfully written.
719 *
720 * Make sure data size is not bigger than one page.
721 *
722 * This function will write and verify every page for at most
723 * NMBM_TRY_COUNT times.
724 */
725static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
726 const void *data, uint32_t size)
727{
728 uint64_t addr, off;
729 bool success;
730 int ret;
731
732 if (size > ni->lower.writesize)
733 return false;
734
735 addr = ba2addr(ni, ba);
736
737 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
738 WATCHDOG_RESET();
739
740 /* Prepare page data. fill 0xff to unused region */
741 memcpy(ni->page_cache, data, size);
742 memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
743
744 success = nmbm_write_phys_page(ni, addr + off, ni->page_cache,
745 NULL, NMBM_MODE_PLACE_OOB);
746 if (!success)
747 return false;
748
749 /* Verify the data just written. ECC error indicates failure */
750 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
751 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800752 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800753 return false;
754
755 if (memcmp(ni->page_cache, data, size))
756 return false;
757 }
758
759 return true;
760}
761
762/*
763 * nmbm_write_signature - Write signature to NAND chip
764 * @ni: NMBM instance structure
765 * @limit: top block address allowed for writing
766 * @signature: the signature to be written
767 * @signature_ba: the actual block address where signature is written to
768 *
769 * Write signature within a specific range, from chip bottom to limit.
770 * At most one block will be written.
771 *
772 * @limit is not counted into the allowed write address.
773 */
774static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
775 const struct nmbm_signature *signature,
776 uint32_t *signature_ba)
777{
778 uint32_t ba = ni->block_count - 1;
779 bool success;
780
781 while (ba > limit) {
782 WATCHDOG_RESET();
783
784 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
785 goto next_block;
developer4f9017d2021-06-16 17:18:47 +0800786
787 /* Insurance to detect unexpected bad block marked by user */
788 if (nmbm_check_bad_phys_block(ni, ba)) {
789 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
790 goto next_block;
791 }
developer8d16ac22021-05-26 15:32:12 +0800792
developer28a313b2021-06-16 17:23:34 +0800793 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800794 if (!success)
795 goto skip_bad_block;
796
797 success = nmbm_write_repeated_data(ni, ba, signature,
798 sizeof(*signature));
799 if (success) {
800 *signature_ba = ba;
801 return true;
802 }
803
804 skip_bad_block:
805 nmbm_mark_phys_bad_block(ni, ba);
806 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
807
808 next_block:
809 ba--;
810 };
811
812 return false;
813}
814
815/*
816 * nmbn_read_data - Read data
817 * @ni: NMBM instance structure
818 * @addr: linear address where the data will be read from
819 * @data: the data to be read
820 * @size: the size of data
821 *
822 * Read data range.
823 * Every page will be tried for at most NMBM_TRY_COUNT times.
824 *
developerd1457c92021-06-16 17:23:18 +0800825 * Return 0 for success, positive value for corrected bitflip count,
826 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +0800827 */
828static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
829 uint32_t size)
830{
831 uint64_t off = addr;
832 uint8_t *ptr = data;
833 uint32_t sizeremain = size, chunksize, leading;
834 int ret;
835
836 while (sizeremain) {
837 WATCHDOG_RESET();
838
839 leading = off & ni->writesize_mask;
840 chunksize = ni->lower.writesize - leading;
841 if (chunksize > sizeremain)
842 chunksize = sizeremain;
843
844 if (chunksize == ni->lower.writesize) {
845 ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL,
846 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800847 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800848 return ret;
849 } else {
850 ret = nmbm_read_phys_page(ni, off - leading,
851 ni->page_cache, NULL,
852 NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800853 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800854 return ret;
855
856 memcpy(ptr, ni->page_cache + leading, chunksize);
857 }
858
859 off += chunksize;
860 ptr += chunksize;
861 sizeremain -= chunksize;
862 }
863
864 return 0;
865}
866
867/*
868 * nmbn_write_verify_data - Write data with validation
869 * @ni: NMBM instance structure
870 * @addr: linear address where the data will be written to
871 * @data: the data to be written
872 * @size: the size of data
873 *
874 * Write data and verify.
875 * Every page will be tried for at most NMBM_TRY_COUNT times.
876 */
877static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
878 const void *data, uint32_t size)
879{
880 uint64_t off = addr;
881 const uint8_t *ptr = data;
882 uint32_t sizeremain = size, chunksize, leading;
883 bool success;
884 int ret;
885
886 while (sizeremain) {
887 WATCHDOG_RESET();
888
889 leading = off & ni->writesize_mask;
890 chunksize = ni->lower.writesize - leading;
891 if (chunksize > sizeremain)
892 chunksize = sizeremain;
893
894 /* Prepare page data. fill 0xff to unused region */
895 memset(ni->page_cache, 0xff, ni->rawpage_size);
896 memcpy(ni->page_cache + leading, ptr, chunksize);
897
898 success = nmbm_write_phys_page(ni, off - leading,
899 ni->page_cache, NULL,
900 NMBM_MODE_PLACE_OOB);
901 if (!success)
902 return false;
903
904 /* Verify the data just written. ECC error indicates failure */
905 ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache,
906 NULL, NMBM_MODE_PLACE_OOB);
developerd1457c92021-06-16 17:23:18 +0800907 if (ret < 0)
developer8d16ac22021-05-26 15:32:12 +0800908 return false;
909
910 if (memcmp(ni->page_cache + leading, ptr, chunksize))
911 return false;
912
913 off += chunksize;
914 ptr += chunksize;
915 sizeremain -= chunksize;
916 }
917
918 return true;
919}
920
921/*
922 * nmbm_write_mgmt_range - Write management data into NAND within a range
923 * @ni: NMBM instance structure
924 * @addr: preferred start block address for writing
925 * @limit: highest block address allowed for writing
926 * @data: the data to be written
927 * @size: the size of data
928 * @actual_start_ba: actual start block address of data
929 * @actual_end_ba: block address after the end of data
930 *
931 * @limit is not counted into the allowed write address.
932 */
933static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
934 uint32_t limit, const void *data,
935 uint32_t size, uint32_t *actual_start_ba,
936 uint32_t *actual_end_ba)
937{
938 const uint8_t *ptr = data;
939 uint32_t sizeremain = size, chunksize;
940 bool success;
941
942 while (sizeremain && ba < limit) {
943 WATCHDOG_RESET();
944
945 chunksize = sizeremain;
946 if (chunksize > ni->lower.erasesize)
947 chunksize = ni->lower.erasesize;
948
949 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
950 goto next_block;
951
developer4f9017d2021-06-16 17:18:47 +0800952 /* Insurance to detect unexpected bad block marked by user */
953 if (nmbm_check_bad_phys_block(ni, ba)) {
954 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
955 goto next_block;
956 }
957
developer28a313b2021-06-16 17:23:34 +0800958 success = nmbm_erase_block_and_check(ni, ba);
developer8d16ac22021-05-26 15:32:12 +0800959 if (!success)
960 goto skip_bad_block;
961
962 success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
963 chunksize);
964 if (!success)
965 goto skip_bad_block;
966
967 if (sizeremain == size)
968 *actual_start_ba = ba;
969
970 ptr += chunksize;
971 sizeremain -= chunksize;
972
973 goto next_block;
974
975 skip_bad_block:
976 nmbm_mark_phys_bad_block(ni, ba);
977 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
978
979 next_block:
980 ba++;
981 }
982
983 if (sizeremain)
984 return false;
985
986 *actual_end_ba = ba;
987
988 return true;
989}
990
991/*
992 * nmbm_generate_info_table_cache - Generate info table cache data
993 * @ni: NMBM instance structure
994 *
995 * Generate info table cache data to be written into flash.
996 */
997static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
998{
999 bool changed = false;
1000
1001 memset(ni->info_table_cache, 0xff, ni->info_table_size);
1002
1003 memcpy(ni->info_table_cache + ni->info_table.state_table_off,
1004 ni->block_state, ni->state_table_size);
1005
1006 memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
1007 ni->block_mapping, ni->mapping_table_size);
1008
1009 ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
1010 ni->info_table.header.version = NMBM_VER;
1011 ni->info_table.header.size = ni->info_table_size;
1012
1013 if (ni->block_state_changed || ni->block_mapping_changed) {
1014 ni->info_table.write_count++;
1015 changed = true;
1016 }
1017
1018 memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
1019
1020 nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
1021
1022 return changed;
1023}
1024
1025/*
1026 * nmbm_write_info_table - Write info table into NAND within a range
1027 * @ni: NMBM instance structure
1028 * @ba: preferred start block address for writing
1029 * @limit: highest block address allowed for writing
1030 * @actual_start_ba: actual start block address of info table
1031 * @actual_end_ba: block address after the end of info table
1032 *
1033 * @limit is counted into the allowed write address.
1034 */
1035static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
1036 uint32_t limit, uint32_t *actual_start_ba,
1037 uint32_t *actual_end_ba)
1038{
1039 return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
1040 ni->info_table_size, actual_start_ba,
1041 actual_end_ba);
1042}
1043
1044/*
1045 * nmbm_mark_tables_clean - Mark info table `clean'
1046 * @ni: NMBM instance structure
1047 */
1048static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
1049{
1050 ni->block_state_changed = 0;
1051 ni->block_mapping_changed = 0;
1052}
1053
1054/*
1055 * nmbm_try_reserve_blocks - Reserve blocks with compromisation
1056 * @ni: NMBM instance structure
1057 * @ba: start physical block address
1058 * @nba: return physical block address after reservation
1059 * @count: number of good blocks to be skipped
1060 * @min_count: minimum number of good blocks to be skipped
1061 * @limit: highest/lowest block address allowed for walking
1062 *
1063 * Reserve specific blocks. If failed, try to reserve as many as possible.
1064 */
1065static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
1066 uint32_t *nba, uint32_t count,
1067 int32_t min_count, int32_t limit)
1068{
1069 int32_t nblocks = count;
1070 bool success;
1071
1072 while (nblocks >= min_count) {
1073 success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
1074 if (success)
1075 return true;
1076
1077 nblocks--;
1078 }
1079
1080 return false;
1081}
1082
1083/*
1084 * nmbm_rebuild_info_table - Build main & backup info table from scratch
1085 * @ni: NMBM instance structure
1086 * @allow_no_gap: allow no spare blocks between two tables
1087 */
1088static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
1089{
1090 uint32_t table_start_ba, table_end_ba, next_start_ba;
1091 uint32_t main_table_end_ba;
1092 bool success;
1093
1094 /* Set initial value */
1095 ni->main_table_ba = 0;
1096 ni->backup_table_ba = 0;
1097 ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
1098
1099 /* Write main table */
1100 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1101 ni->mapping_blocks_top_ba,
1102 &table_start_ba, &table_end_ba);
1103 if (!success) {
1104 /* Failed to write main table, data will be lost */
1105 nlog_emerg(ni, "Unable to write at least one info table!\n");
1106 nlog_emerg(ni, "Please save your data before power off!\n");
1107 ni->protected = 1;
1108 return false;
1109 }
1110
1111 /* Main info table is successfully written, record its offset */
1112 ni->main_table_ba = table_start_ba;
1113 main_table_end_ba = table_end_ba;
1114
1115 /* Adjust mapping_blocks_ba */
1116 ni->mapping_blocks_ba = table_end_ba;
1117
1118 nmbm_mark_tables_clean(ni);
1119
1120 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1121
1122 /* Reserve spare blocks for main info table. */
1123 success = nmbm_try_reserve_blocks(ni, table_end_ba,
1124 &next_start_ba,
1125 ni->info_table_spare_blocks, 0,
1126 ni->mapping_blocks_top_ba -
1127 size2blk(ni, ni->info_table_size));
1128 if (!success) {
1129 /* There is no spare block. */
1130 nlog_debug(ni, "No room for backup info table\n");
1131 return true;
1132 }
1133
1134 /* Write backup info table. */
1135 success = nmbm_write_info_table(ni, next_start_ba,
1136 ni->mapping_blocks_top_ba,
1137 &table_start_ba, &table_end_ba);
1138 if (!success) {
1139 /* There is no enough blocks for backup table. */
1140 nlog_debug(ni, "No room for backup info table\n");
1141 return true;
1142 }
1143
1144 /* Backup table is successfully written, record its offset */
1145 ni->backup_table_ba = table_start_ba;
1146
1147 /* Adjust mapping_blocks_off */
1148 ni->mapping_blocks_ba = table_end_ba;
1149
1150 /* Erase spare blocks of main table to clean possible interference data */
1151 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1152
1153 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1154
1155 return true;
1156}
1157
1158/*
1159 * nmbm_rescue_single_info_table - Rescue when there is only one info table
1160 * @ni: NMBM instance structure
1161 *
1162 * This function is called when there is only one info table exists.
1163 * This function may fail if we can't write new info table
1164 */
1165static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
1166{
1167 uint32_t table_start_ba, table_end_ba, write_ba;
1168 bool success;
1169
1170 /* Try to write new info table in front of existing table */
1171 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1172 ni->main_table_ba,
1173 &table_start_ba,
1174 &table_end_ba);
1175 if (success) {
1176 /*
1177 * New table becomes the main table, existing table becomes
1178 * the backup table.
1179 */
1180 ni->backup_table_ba = ni->main_table_ba;
1181 ni->main_table_ba = table_start_ba;
1182
1183 nmbm_mark_tables_clean(ni);
1184
1185 /* Erase spare blocks of main table to clean possible interference data */
1186 nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
1187
1188 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1189
1190 return true;
1191 }
1192
1193 /* Try to reserve spare blocks for existing table */
1194 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1195 ni->info_table_spare_blocks, 0,
1196 ni->mapping_blocks_top_ba -
1197 size2blk(ni, ni->info_table_size));
1198 if (!success) {
1199 nlog_warn(ni, "Failed to rescue single info table\n");
1200 return false;
1201 }
1202
1203 /* Try to write new info table next to the existing table */
1204 while (write_ba >= ni->mapping_blocks_ba) {
1205 WATCHDOG_RESET();
1206
1207 success = nmbm_write_info_table(ni, write_ba,
1208 ni->mapping_blocks_top_ba,
1209 &table_start_ba,
1210 &table_end_ba);
1211 if (success)
1212 break;
1213
1214 write_ba--;
1215 }
1216
1217 if (success) {
1218 /* Erase spare blocks of main table to clean possible interference data */
1219 nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
1220
1221 /* New table becomes the backup table */
1222 ni->backup_table_ba = table_start_ba;
1223 ni->mapping_blocks_ba = table_end_ba;
1224
1225 nmbm_mark_tables_clean(ni);
1226
1227 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1228
1229 return true;
1230 }
1231
1232 nlog_warn(ni, "Failed to rescue single info table\n");
1233 return false;
1234}
1235
1236/*
1237 * nmbm_update_single_info_table - Update specific one info table
1238 * @ni: NMBM instance structure
1239 */
1240static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
1241 bool update_main_table)
1242{
1243 uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
1244 bool success;
1245
1246 /* Determine the write range */
1247 if (update_main_table) {
1248 write_start_ba = ni->main_table_ba;
1249 write_limit = ni->backup_table_ba;
1250 } else {
1251 write_start_ba = ni->backup_table_ba;
1252 write_limit = ni->mapping_blocks_top_ba;
1253 }
1254
1255 nmbm_mark_block_color_mgmt(ni, write_start_ba, write_limit - 1);
1256
1257 success = nmbm_write_info_table(ni, write_start_ba, write_limit,
1258 &table_start_ba, &table_end_ba);
1259 if (success) {
1260 if (update_main_table) {
1261 ni->main_table_ba = table_start_ba;
1262 } else {
1263 ni->backup_table_ba = table_start_ba;
1264 ni->mapping_blocks_ba = table_end_ba;
1265 }
1266
1267 nmbm_mark_tables_clean(ni);
1268
1269 nlog_table_update(ni, update_main_table, table_start_ba,
1270 table_end_ba);
1271
1272 return true;
1273 }
1274
1275 if (update_main_table) {
1276 /*
1277 * If failed to update main table, make backup table the new
1278 * main table, and call nmbm_rescue_single_info_table()
1279 */
1280 nlog_warn(ni, "Unable to update %s info table\n",
1281 update_main_table ? "Main" : "Backup");
1282
1283 ni->main_table_ba = ni->backup_table_ba;
1284 ni->backup_table_ba = 0;
1285 return nmbm_rescue_single_info_table(ni);
1286 }
1287
1288 /* Only one table left */
1289 ni->mapping_blocks_ba = ni->backup_table_ba;
1290 ni->backup_table_ba = 0;
1291
1292 return false;
1293}
1294
1295/*
1296 * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1297 * @ni: NMBM instance structure
1298 *
1299 * This function is called when main info table failed to be written, and
1300 * backup info table exists.
1301 */
1302static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
1303{
1304 uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
1305 uint32_t main_table_end_ba, write_ba;
1306 uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
1307 bool success;
1308
1309 /* Try to reserve spare blocks for existing backup info table */
1310 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1311 ni->info_table_spare_blocks, 0,
1312 ni->mapping_blocks_top_ba -
1313 info_table_erasesize);
1314 if (!success) {
1315 /* There is no spare block. Backup info table becomes the main table. */
1316 nlog_err(ni, "No room for temporary info table\n");
1317 ni->main_table_ba = ni->backup_table_ba;
1318 ni->backup_table_ba = 0;
1319 return true;
1320 }
1321
1322 /* Try to write temporary info table into spare unmapped blocks */
1323 while (write_ba >= ni->mapping_blocks_ba) {
1324 WATCHDOG_RESET();
1325
1326 success = nmbm_write_info_table(ni, write_ba,
1327 ni->mapping_blocks_top_ba,
1328 &tmp_table_start_ba,
1329 &tmp_table_end_ba);
1330 if (success)
1331 break;
1332
1333 write_ba--;
1334 }
1335
1336 if (!success) {
1337 /* Backup info table becomes the main table */
1338 nlog_err(ni, "Failed to update main info table\n");
1339 ni->main_table_ba = ni->backup_table_ba;
1340 ni->backup_table_ba = 0;
1341 return true;
1342 }
1343
1344 /* Adjust mapping_blocks_off */
1345 ni->mapping_blocks_ba = tmp_table_end_ba;
1346
1347 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1348 tmp_table_end_ba - 1);
1349
1350 /*
1351 * Now write main info table at the beginning of management area.
1352 * This operation will generally destroy the original backup info
1353 * table.
1354 */
1355 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1356 tmp_table_start_ba,
1357 &main_table_start_ba,
1358 &main_table_end_ba);
1359 if (!success) {
1360 /* Temporary info table becomes the main table */
1361 ni->main_table_ba = tmp_table_start_ba;
1362 ni->backup_table_ba = 0;
1363
1364 nmbm_mark_tables_clean(ni);
1365
1366 nlog_err(ni, "Failed to update main info table\n");
1367 nmbm_mark_block_color_info_table(ni, tmp_table_start_ba,
1368 tmp_table_end_ba - 1);
1369
1370 return true;
1371 }
1372
1373 /* Main info table has been successfully written, record its offset */
1374 ni->main_table_ba = main_table_start_ba;
1375
1376 nmbm_mark_tables_clean(ni);
1377
1378 nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
1379
1380 /*
1381 * Temporary info table becomes the new backup info table if it's
1382 * not overwritten.
1383 */
1384 if (main_table_end_ba <= tmp_table_start_ba) {
1385 ni->backup_table_ba = tmp_table_start_ba;
1386
1387 nlog_table_creation(ni, false, tmp_table_start_ba,
1388 tmp_table_end_ba);
1389
1390 return true;
1391 }
1392
1393 /* Adjust mapping_blocks_off */
1394 ni->mapping_blocks_ba = main_table_end_ba;
1395
1396 /* Try to reserve spare blocks for new main info table */
1397 success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
1398 ni->info_table_spare_blocks, 0,
1399 ni->mapping_blocks_top_ba -
1400 info_table_erasesize);
1401 if (!success) {
1402 /* There is no spare block. Only main table exists. */
1403 nlog_err(ni, "No room for backup info table\n");
1404 ni->backup_table_ba = 0;
1405 return true;
1406 }
1407
1408 /* Write new backup info table. */
1409 while (write_ba >= main_table_end_ba) {
1410 WATCHDOG_RESET();
1411
1412 success = nmbm_write_info_table(ni, write_ba,
1413 ni->mapping_blocks_top_ba,
1414 &tmp_table_start_ba,
1415 &tmp_table_end_ba);
1416 if (success)
1417 break;
1418
1419 write_ba--;
1420 }
1421
1422 if (!success) {
1423 nlog_err(ni, "No room for backup info table\n");
1424 ni->backup_table_ba = 0;
1425 return true;
1426 }
1427
1428 /* Backup info table has been successfully written, record its offset */
1429 ni->backup_table_ba = tmp_table_start_ba;
1430
1431 /* Adjust mapping_blocks_off */
1432 ni->mapping_blocks_ba = tmp_table_end_ba;
1433
1434 /* Erase spare blocks of main table to clean possible interference data */
1435 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1436
1437 nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
1438
1439 return true;
1440}
1441
1442/*
1443 * nmbm_update_info_table_once - Update info table once
1444 * @ni: NMBM instance structure
1445 * @force: force update
1446 *
1447 * Update both main and backup info table. Return true if at least one info
1448 * table has been successfully written.
1449 * This function only try to update info table once regard less of the result.
1450 */
1451static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
1452{
1453 uint32_t table_start_ba, table_end_ba;
1454 uint32_t main_table_limit;
1455 bool success;
1456
1457 /* Do nothing if there is no change */
1458 if (!nmbm_generate_info_table_cache(ni) && !force)
1459 return true;
1460
1461 /* Check whether both two tables exist */
1462 if (!ni->backup_table_ba) {
1463 main_table_limit = ni->mapping_blocks_top_ba;
1464 goto write_main_table;
1465 }
1466
1467 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1468 ni->mapping_blocks_ba - 1);
1469
1470 /*
1471 * Write backup info table in its current range.
1472 * Note that limit is set to mapping_blocks_top_off to provide as many
1473 * spare blocks as possible for the backup table. If at last
1474 * unmapped blocks are used by backup table, mapping_blocks_off will
1475 * be adjusted.
1476 */
1477 success = nmbm_write_info_table(ni, ni->backup_table_ba,
1478 ni->mapping_blocks_top_ba,
1479 &table_start_ba, &table_end_ba);
1480 if (!success) {
1481 /*
1482 * There is nothing to do if failed to write backup table.
1483 * Write the main table now.
1484 */
1485 nlog_err(ni, "No room for backup table\n");
1486 ni->mapping_blocks_ba = ni->backup_table_ba;
1487 ni->backup_table_ba = 0;
1488 main_table_limit = ni->mapping_blocks_top_ba;
1489 goto write_main_table;
1490 }
1491
1492 /* Backup table is successfully written, record its offset */
1493 ni->backup_table_ba = table_start_ba;
1494
1495 /* Adjust mapping_blocks_off */
1496 ni->mapping_blocks_ba = table_end_ba;
1497
1498 nmbm_mark_tables_clean(ni);
1499
1500 /* The normal limit of main table */
1501 main_table_limit = ni->backup_table_ba;
1502
1503 nlog_table_update(ni, false, table_start_ba, table_end_ba);
1504
1505write_main_table:
1506 if (!ni->main_table_ba)
1507 goto rebuild_tables;
1508
1509 if (!ni->backup_table_ba)
1510 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1511 ni->mapping_blocks_ba - 1);
1512 else
1513 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1514 ni->backup_table_ba - 1);
1515
1516 /* Write main info table in its current range */
1517 success = nmbm_write_info_table(ni, ni->main_table_ba,
1518 main_table_limit, &table_start_ba,
1519 &table_end_ba);
1520 if (!success) {
1521 /* If failed to write main table, go rescue procedure */
1522 if (!ni->backup_table_ba)
1523 goto rebuild_tables;
1524
1525 return nmbm_rescue_main_info_table(ni);
1526 }
1527
1528 /* Main info table is successfully written, record its offset */
1529 ni->main_table_ba = table_start_ba;
1530
1531 /* Adjust mapping_blocks_off */
1532 if (!ni->backup_table_ba)
1533 ni->mapping_blocks_ba = table_end_ba;
1534
1535 nmbm_mark_tables_clean(ni);
1536
1537 nlog_table_update(ni, true, table_start_ba, table_end_ba);
1538
1539 return true;
1540
1541rebuild_tables:
1542 return nmbm_rebuild_info_table(ni);
1543}
1544
1545/*
1546 * nmbm_update_info_table - Update info table
1547 * @ni: NMBM instance structure
1548 *
1549 * Update both main and backup info table. Return true if at least one table
1550 * has been successfully written.
1551 * This function will try to update info table repeatedly until no new bad
1552 * block found during updating.
1553 */
1554static bool nmbm_update_info_table(struct nmbm_instance *ni)
1555{
1556 bool success;
1557
1558 if (ni->protected)
1559 return true;
1560
1561 while (ni->block_state_changed || ni->block_mapping_changed) {
1562 success = nmbm_update_info_table_once(ni, false);
1563 if (!success) {
1564 nlog_err(ni, "Failed to update info table\n");
1565 return false;
1566 }
1567 }
1568
1569 return true;
1570}
1571
1572/*
1573 * nmbm_map_block - Map a bad block to a unused spare block
1574 * @ni: NMBM instance structure
1575 * @lb: logic block addr to map
1576 */
1577static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
1578{
1579 uint32_t pb;
1580 bool success;
1581
1582 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1583 nlog_warn(ni, "No spare unmapped blocks.\n");
1584 return false;
1585 }
1586
1587 success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
1588 ni->mapping_blocks_ba);
1589 if (!success) {
1590 nlog_warn(ni, "No spare unmapped blocks.\n");
1591 nmbm_update_info_table(ni);
1592 ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
1593 return false;
1594 }
1595
1596 ni->block_mapping[lb] = pb;
1597 ni->mapping_blocks_top_ba--;
1598 ni->block_mapping_changed++;
1599
1600 nlog_info(ni, "Logic block %u mapped to physical blcok %u\n", lb, pb);
1601 nmbm_mark_block_color_mapped(ni, pb);
1602
1603 return true;
1604}
1605
1606/*
1607 * nmbm_create_info_table - Create info table(s)
1608 * @ni: NMBM instance structure
1609 *
1610 * This function assumes that the chip has no existing info table(s)
1611 */
1612static bool nmbm_create_info_table(struct nmbm_instance *ni)
1613{
1614 uint32_t lb;
1615 bool success;
1616
1617 /* Set initial mapping_blocks_top_off */
1618 success = nmbm_block_walk(ni, false, ni->signature_ba,
1619 &ni->mapping_blocks_top_ba, 1,
1620 ni->mgmt_start_ba);
1621 if (!success) {
1622 nlog_err(ni, "No room for spare blocks\n");
1623 return false;
1624 }
1625
1626 /* Generate info table cache */
1627 nmbm_generate_info_table_cache(ni);
1628
1629 /* Write info table */
1630 success = nmbm_rebuild_info_table(ni);
1631 if (!success) {
1632 nlog_err(ni, "Failed to build info tables\n");
1633 return false;
1634 }
1635
1636 /* Remap bad block(s) at end of data area */
1637 for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
1638 success = nmbm_map_block(ni, lb);
1639 if (!success)
1640 break;
1641
1642 ni->data_block_count++;
1643 }
1644
1645 /* If state table and/or mapping table changed, update info table. */
1646 success = nmbm_update_info_table(ni);
1647 if (!success)
1648 return false;
1649
1650 return true;
1651}
1652
1653/*
1654 * nmbm_create_new - Create NMBM on a new chip
1655 * @ni: NMBM instance structure
1656 */
1657static bool nmbm_create_new(struct nmbm_instance *ni)
1658{
1659 bool success;
1660
1661 /* Determine the boundary of management blocks */
1662 ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->lower.max_ratio) / NMBM_MGMT_DIV;
1663
1664 if (ni->lower.max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->lower.max_reserved_blocks)
1665 ni->mgmt_start_ba = ni->block_count - ni->lower.max_reserved_blocks;
1666
1667 nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1668 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1669 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba, ni->block_count - 1);
1670
1671 /* Fill block state table & mapping table */
1672 nmbm_scan_badblocks(ni);
1673 nmbm_build_mapping_table(ni);
1674
1675 /* Write signature */
1676 ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
1677 ni->signature.header.version = NMBM_VER;
1678 ni->signature.header.size = sizeof(ni->signature);
1679 ni->signature.nand_size = ni->lower.size;
1680 ni->signature.block_size = ni->lower.erasesize;
1681 ni->signature.page_size = ni->lower.writesize;
1682 ni->signature.spare_size = ni->lower.oobsize;
1683 ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
1684 ni->signature.max_try_count = NMBM_TRY_COUNT;
1685 nmbm_update_checksum(&ni->signature.header);
1686
1687 success = nmbm_write_signature(ni, ni->mgmt_start_ba,
1688 &ni->signature, &ni->signature_ba);
1689 if (!success) {
1690 nlog_err(ni, "Failed to write signature to a proper offset\n");
1691 return false;
1692 }
1693
1694 nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
1695 ni->signature_ba, ba2addr(ni, ni->signature_ba));
1696 nmbm_mark_block_color_signature(ni, ni->signature_ba);
1697
1698 /* Write info table(s) */
1699 success = nmbm_create_info_table(ni);
1700 if (success) {
1701 nlog_info(ni, "NMBM has been successfully created\n");
1702 return true;
1703 }
1704
1705 return false;
1706}
1707
1708/*
1709 * nmbm_check_info_table_header - Check if a info table header is valid
1710 * @ni: NMBM instance structure
1711 * @data: pointer to the info table header
1712 */
1713static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
1714{
1715 struct nmbm_info_table_header *ifthdr = data;
1716
1717 if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
1718 return false;
1719
1720 if (ifthdr->header.size != ni->info_table_size)
1721 return false;
1722
1723 if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
1724 return false;
1725
1726 if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
1727 return false;
1728
1729 return true;
1730}
1731
1732/*
1733 * nmbm_check_info_table - Check if a whole info table is valid
1734 * @ni: NMBM instance structure
1735 * @start_ba: start block address of this table
1736 * @end_ba: end block address of this table
1737 * @data: pointer to the info table header
1738 * @mapping_blocks_top_ba: return the block address of top remapped block
1739 */
1740static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
1741 uint32_t end_ba, void *data,
1742 uint32_t *mapping_blocks_top_ba)
1743{
1744 struct nmbm_info_table_header *ifthdr = data;
1745 int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
1746 nmbm_bitmap_t *block_state = (nmbm_bitmap_t *)((uintptr_t)data + ifthdr->state_table_off);
1747 uint32_t minimum_mapping_pb = ni->signature_ba;
1748 uint32_t ba;
1749
1750 for (ba = 0; ba < ni->data_block_count; ba++) {
1751 if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
1752 block_mapping[ba] == ni->signature_ba)
1753 return false;
1754
1755 if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
1756 minimum_mapping_pb = block_mapping[ba];
1757 }
1758
1759 for (ba = start_ba; ba < end_ba; ba++) {
1760 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1761 continue;
1762
1763 if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
1764 return false;
1765 }
1766
1767 *mapping_blocks_top_ba = minimum_mapping_pb - 1;
1768
1769 return true;
1770}
1771
1772/*
1773 * nmbm_try_load_info_table - Try to load info table from a address
1774 * @ni: NMBM instance structure
1775 * @ba: start block address of the info table
1776 * @eba: return the block address after end of the table
1777 * @write_count: return the write count of this table
1778 * @mapping_blocks_top_ba: return the block address of top remapped block
1779 * @table_loaded: used to record whether ni->info_table has valid data
1780 */
1781static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1782 uint32_t *eba, uint32_t *write_count,
1783 uint32_t *mapping_blocks_top_ba,
1784 bool table_loaded)
1785{
1786 struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
1787 uint8_t *off = ni->info_table_cache;
1788 uint32_t limit = ba + size2blk(ni, ni->info_table_size);
1789 uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
1790 bool success, checkhdr = true;
1791 int ret;
1792
1793 while (sizeremain && ba < limit) {
1794 WATCHDOG_RESET();
1795
1796 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1797 goto next_block;
1798
1799 if (nmbm_check_bad_phys_block(ni, ba)) {
1800 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1801 goto next_block;
1802 }
1803
1804 chunksize = sizeremain;
1805 if (chunksize > ni->lower.erasesize)
1806 chunksize = ni->lower.erasesize;
1807
1808 /* Assume block with ECC error has no info table data */
1809 ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
1810 if (ret < 0)
1811 goto skip_bad_block;
1812 else if (ret > 0)
1813 return false;
1814
1815 if (checkhdr) {
1816 success = nmbm_check_info_table_header(ni, off);
1817 if (!success)
1818 return false;
1819
1820 start_ba = ba;
1821 checkhdr = false;
1822 }
1823
1824 off += chunksize;
1825 sizeremain -= chunksize;
1826
1827 goto next_block;
1828
1829 skip_bad_block:
1830 /* Only mark bad in memory */
1831 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1832
1833 next_block:
1834 ba++;
1835 }
1836
1837 if (sizeremain)
1838 return false;
1839
1840 success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
1841 if (!success)
1842 return false;
1843
1844 *eba = ba;
1845 *write_count = ifthdr->write_count;
1846
1847 success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
1848 mapping_blocks_top_ba);
1849 if (!success)
1850 return false;
1851
1852 if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
1853 memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
1854 memcpy(ni->block_state,
1855 (uint8_t *)ifthdr + ifthdr->state_table_off,
1856 ni->state_table_size);
1857 memcpy(ni->block_mapping,
1858 (uint8_t *)ifthdr + ifthdr->mapping_table_off,
1859 ni->mapping_table_size);
1860 ni->info_table.write_count = ifthdr->write_count;
1861 }
1862
1863 return true;
1864}
1865
1866/*
1867 * nmbm_search_info_table - Search info table from specific address
1868 * @ni: NMBM instance structure
1869 * @ba: start block address to search
1870 * @limit: highest block address allowed for searching
1871 * @table_start_ba: return the start block address of this table
1872 * @table_end_ba: return the block address after end of this table
1873 * @write_count: return the write count of this table
1874 * @mapping_blocks_top_ba: return the block address of top remapped block
1875 * @table_loaded: used to record whether ni->info_table has valid data
1876 */
1877static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
1878 uint32_t limit, uint32_t *table_start_ba,
1879 uint32_t *table_end_ba,
1880 uint32_t *write_count,
1881 uint32_t *mapping_blocks_top_ba,
1882 bool table_loaded)
1883{
1884 bool success;
1885
1886 while (ba < limit - size2blk(ni, ni->info_table_size)) {
1887 WATCHDOG_RESET();
1888
1889 success = nmbm_try_load_info_table(ni, ba, table_end_ba,
1890 write_count,
1891 mapping_blocks_top_ba,
1892 table_loaded);
1893 if (success) {
1894 *table_start_ba = ba;
1895 return true;
1896 }
1897
1898 ba++;
1899 }
1900
1901 return false;
1902}
1903
1904/*
1905 * nmbm_load_info_table - Load info table(s) from a chip
1906 * @ni: NMBM instance structure
1907 * @ba: start block address to search info table
1908 * @limit: highest block address allowed for searching
1909 */
1910static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1911 uint32_t limit)
1912{
1913 uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
1914 uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
1915 uint32_t main_table_write_count, backup_table_write_count;
1916 uint32_t i;
1917 bool success;
1918
1919 /* Set initial value */
1920 ni->main_table_ba = 0;
1921 ni->backup_table_ba = 0;
1922 ni->info_table.write_count = 0;
1923 ni->mapping_blocks_top_ba = ni->signature_ba - 1;
1924 ni->data_block_count = ni->signature.mgmt_start_pb;
1925
1926 /* Find first info table */
1927 success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
1928 &main_table_end_ba, &main_table_write_count,
1929 &main_mapping_blocks_top_ba, false);
1930 if (!success) {
1931 nlog_warn(ni, "No valid info table found\n");
1932 return false;
1933 }
1934
1935 table_end_ba = main_table_end_ba;
1936
1937 nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
1938 main_table_end_ba);
1939
1940 /* Find second info table */
1941 success = nmbm_search_info_table(ni, main_table_end_ba, limit,
1942 &ni->backup_table_ba, &backup_table_end_ba,
1943 &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
1944 if (!success) {
1945 nlog_warn(ni, "Second info table not found\n");
1946 } else {
1947 table_end_ba = backup_table_end_ba;
1948
1949 nlog_table_found(ni, false, backup_table_write_count,
1950 ni->backup_table_ba, backup_table_end_ba);
1951 }
1952
1953 /* Pick mapping_blocks_top_ba */
1954 if (!ni->backup_table_ba) {
1955 ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
1956 } else {
1957 if (main_table_write_count >= backup_table_write_count)
1958 ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
1959 else
1960 ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
1961 }
1962
1963 /* Set final mapping_blocks_ba */
1964 ni->mapping_blocks_ba = table_end_ba;
1965
1966 /* Set final data_block_count */
1967 for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
1968 if (ni->block_mapping[i - 1] >= 0) {
1969 ni->data_block_count = i;
1970 break;
1971 }
1972 }
1973
1974 /* Debug purpose: mark mapped blocks and bad blocks */
1975 for (i = 0; i < ni->data_block_count; i++) {
1976 if (ni->block_mapping[i] > ni->mapping_blocks_top_ba)
1977 nmbm_mark_block_color_mapped(ni, ni->block_mapping[i]);
1978 }
1979
1980 for (i = 0; i < ni->block_count; i++) {
1981 if (nmbm_get_block_state(ni, i) == BLOCK_ST_BAD)
1982 nmbm_mark_block_color_bad(ni, i);
1983 }
1984
1985 /* Regenerate the info table cache from the final selected info table */
1986 nmbm_generate_info_table_cache(ni);
1987
1988 /*
1989 * If only one table exists, try to write another table.
1990 * If two tables have different write count, try to update info table
1991 */
1992 if (!ni->backup_table_ba) {
1993 success = nmbm_rescue_single_info_table(ni);
1994 } else if (main_table_write_count != backup_table_write_count) {
1995 /* Mark state & mapping tables changed */
1996 ni->block_state_changed = 1;
1997 ni->block_mapping_changed = 1;
1998
1999 success = nmbm_update_single_info_table(ni,
2000 main_table_write_count < backup_table_write_count);
2001 } else {
2002 success = true;
2003 }
2004
2005 /*
2006 * If there is no spare unmapped blocks, or still only one table
2007 * exists, set the chip to read-only
2008 */
2009 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
2010 nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
2011 ni->protected = 1;
2012 } else if (!success) {
2013 nlog_warn(ni, "Only one info table found. Device is now read-only\n");
2014 ni->protected = 1;
2015 }
2016
2017 return true;
2018}
2019
2020/*
2021 * nmbm_load_existing - Load NMBM from a new chip
2022 * @ni: NMBM instance structure
2023 */
2024static bool nmbm_load_existing(struct nmbm_instance *ni)
2025{
2026 bool success;
2027
2028 /* Calculate the boundary of management blocks */
2029 ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
2030
2031 nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
2032 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
2033 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
2034 ni->signature_ba - 1);
2035
2036 /* Look for info table(s) */
2037 success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
2038 ni->signature_ba);
2039 if (success) {
2040 nlog_info(ni, "NMBM has been successfully attached\n");
2041 return true;
2042 }
2043
2044 if (!(ni->lower.flags & NMBM_F_CREATE))
2045 return false;
2046
2047 /* Fill block state table & mapping table */
2048 nmbm_scan_badblocks(ni);
2049 nmbm_build_mapping_table(ni);
2050
2051 /* Write info table(s) */
2052 success = nmbm_create_info_table(ni);
2053 if (success) {
2054 nlog_info(ni, "NMBM has been successfully created\n");
2055 return true;
2056 }
2057
2058 return false;
2059}
2060
2061/*
2062 * nmbm_find_signature - Find signature in the lower NAND chip
2063 * @ni: NMBM instance structure
2064 * @signature_ba: used for storing block address of the signature
2065 * @signature_ba: return the actual block address of signature block
2066 *
2067 * Find a valid signature from a specific range in the lower NAND chip,
2068 * from bottom (highest address) to top (lowest address)
2069 *
2070 * Return true if found.
2071 */
2072static bool nmbm_find_signature(struct nmbm_instance *ni,
2073 struct nmbm_signature *signature,
2074 uint32_t *signature_ba)
2075{
2076 struct nmbm_signature sig;
2077 uint64_t off, addr;
2078 uint32_t block_count, ba, limit;
2079 bool success;
2080 int ret;
2081
2082 /* Calculate top and bottom block address */
2083 block_count = ni->lower.size >> ni->erasesize_shift;
2084 ba = block_count;
2085 limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->lower.max_ratio);
2086 if (ni->lower.max_reserved_blocks && block_count - limit > ni->lower.max_reserved_blocks)
2087 limit = block_count - ni->lower.max_reserved_blocks;
2088
2089 while (ba >= limit) {
2090 WATCHDOG_RESET();
2091
2092 ba--;
2093 addr = ba2addr(ni, ba);
2094
2095 if (nmbm_check_bad_phys_block(ni, ba))
2096 continue;
2097
2098 /* Check every page.
2099 * As long as at leaset one page contains valid signature,
2100 * the block is treated as a valid signature block.
2101 */
2102 for (off = 0; off < ni->lower.erasesize;
2103 off += ni->lower.writesize) {
2104 WATCHDOG_RESET();
2105
2106 ret = nmbn_read_data(ni, addr + off, &sig,
2107 sizeof(sig));
2108 if (ret)
2109 continue;
2110
2111 /* Check for header size and checksum */
2112 success = nmbm_check_header(&sig, sizeof(sig));
2113 if (!success)
2114 continue;
2115
2116 /* Check for header magic */
2117 if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
2118 /* Found it */
2119 memcpy(signature, &sig, sizeof(sig));
2120 *signature_ba = ba;
2121 return true;
2122 }
2123 }
2124 };
2125
2126 return false;
2127}
2128
2129/*
2130 * is_power_of_2_u64 - Check whether a 64-bit integer is power of 2
2131 * @n: number to check
2132 */
2133static bool is_power_of_2_u64(uint64_t n)
2134{
2135 return (n != 0 && ((n & (n - 1)) == 0));
2136}
2137
2138/*
2139 * nmbm_check_lower_members - Validate the members of lower NAND device
2140 * @nld: Lower NAND chip structure
2141 */
2142static bool nmbm_check_lower_members(struct nmbm_lower_device *nld)
2143{
2144
2145 if (!nld->size || !is_power_of_2_u64(nld->size)) {
2146 nmbm_log_lower(nld, NMBM_LOG_ERR,
2147 "Chip size %llu is not valid\n", nld->size);
2148 return false;
2149 }
2150
2151 if (!nld->erasesize || !is_power_of_2(nld->erasesize)) {
2152 nmbm_log_lower(nld, NMBM_LOG_ERR,
2153 "Block size %u is not valid\n", nld->erasesize);
2154 return false;
2155 }
2156
2157 if (!nld->writesize || !is_power_of_2(nld->writesize)) {
2158 nmbm_log_lower(nld, NMBM_LOG_ERR,
2159 "Page size %u is not valid\n", nld->writesize);
2160 return false;
2161 }
2162
2163 if (!nld->oobsize || !is_power_of_2(nld->oobsize)) {
2164 nmbm_log_lower(nld, NMBM_LOG_ERR,
2165 "Page spare size %u is not valid\n", nld->oobsize);
2166 return false;
2167 }
2168
2169 if (!nld->read_page || !nld->write_page || !nld->erase_block) {
2170 nmbm_log_lower(nld, NMBM_LOG_ERR,
2171 "read_page(), write_page() and erase_block() are required\n");
2172 return false;
2173 }
2174
2175 /* Data sanity check */
2176 if (!nld->max_ratio)
2177 nld->max_ratio = 1;
2178
2179 if (nld->max_ratio >= NMBM_MGMT_DIV - 1) {
2180 nmbm_log_lower(nld, NMBM_LOG_ERR,
2181 "max ratio %u is invalid\n", nld->max_ratio);
2182 return false;
2183 }
2184
2185 if (nld->max_reserved_blocks && nld->max_reserved_blocks < NMBM_MGMT_BLOCKS_MIN) {
2186 nmbm_log_lower(nld, NMBM_LOG_ERR,
2187 "max reserved blocks %u is too small\n", nld->max_reserved_blocks);
2188 return false;
2189 }
2190
2191 return true;
2192}
2193
2194/*
2195 * nmbm_calc_structure_size - Calculate the instance structure size
2196 * @nld: NMBM lower device structure
2197 */
2198size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld)
2199{
2200 uint32_t state_table_size, mapping_table_size, info_table_size;
2201 uint32_t block_count;
2202
2203 block_count = nmbm_lldiv(nld->size, nld->erasesize);
2204
2205 /* Calculate info table size */
2206 state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2207 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2208 mapping_table_size = block_count * sizeof(int32_t);
2209
2210 info_table_size = NMBM_ALIGN(sizeof(struct nmbm_info_table_header),
2211 nld->writesize);
2212 info_table_size += NMBM_ALIGN(state_table_size, nld->writesize);
2213 info_table_size += NMBM_ALIGN(mapping_table_size, nld->writesize);
2214
2215 return info_table_size + state_table_size + mapping_table_size +
2216 nld->writesize + nld->oobsize + sizeof(struct nmbm_instance);
2217}
2218
2219/*
2220 * nmbm_init_structure - Initialize members of instance structure
2221 * @ni: NMBM instance structure
2222 */
2223static void nmbm_init_structure(struct nmbm_instance *ni)
2224{
2225 uint32_t pages_per_block, blocks_per_chip;
2226 uintptr_t ptr;
2227
2228 pages_per_block = ni->lower.erasesize / ni->lower.writesize;
2229 blocks_per_chip = nmbm_lldiv(ni->lower.size, ni->lower.erasesize);
2230
2231 ni->rawpage_size = ni->lower.writesize + ni->lower.oobsize;
2232 ni->rawblock_size = pages_per_block * ni->rawpage_size;
2233 ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
2234
2235 ni->writesize_mask = ni->lower.writesize - 1;
2236 ni->erasesize_mask = ni->lower.erasesize - 1;
2237
2238 ni->writesize_shift = ffs(ni->lower.writesize) - 1;
2239 ni->erasesize_shift = ffs(ni->lower.erasesize) - 1;
2240
2241 /* Calculate number of block this chip */
2242 ni->block_count = ni->lower.size >> ni->erasesize_shift;
2243
2244 /* Calculate info table size */
2245 ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2246 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2247 ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
2248
2249 ni->info_table_size = NMBM_ALIGN(sizeof(ni->info_table),
2250 ni->lower.writesize);
2251 ni->info_table.state_table_off = ni->info_table_size;
2252
2253 ni->info_table_size += NMBM_ALIGN(ni->state_table_size,
2254 ni->lower.writesize);
2255 ni->info_table.mapping_table_off = ni->info_table_size;
2256
2257 ni->info_table_size += NMBM_ALIGN(ni->mapping_table_size,
2258 ni->lower.writesize);
2259
2260 ni->info_table_spare_blocks = nmbm_get_spare_block_count(
2261 size2blk(ni, ni->info_table_size));
2262
2263 /* Assign memory to members */
2264 ptr = (uintptr_t)ni + sizeof(*ni);
2265
2266 ni->info_table_cache = (void *)ptr;
2267 ptr += ni->info_table_size;
2268
2269 ni->block_state = (void *)ptr;
2270 ptr += ni->state_table_size;
2271
2272 ni->block_mapping = (void *)ptr;
2273 ptr += ni->mapping_table_size;
2274
2275 ni->page_cache = (uint8_t *)ptr;
2276
2277 /* Initialize block state table */
2278 ni->block_state_changed = 0;
2279 memset(ni->block_state, 0xff, ni->state_table_size);
2280
2281 /* Initialize block mapping table */
2282 ni->block_mapping_changed = 0;
2283}
2284
2285/*
2286 * nmbm_attach - Attach to a lower device
2287 * @nld: NMBM lower device structure
2288 * @ni: NMBM instance structure
2289 */
2290int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni)
2291{
2292 bool success;
2293
2294 if (!nld || !ni)
2295 return -EINVAL;
2296
2297 /* Set default log level */
2298 ni->log_display_level = NMBM_DEFAULT_LOG_LEVEL;
2299
2300 /* Check lower members */
2301 success = nmbm_check_lower_members(nld);
2302 if (!success)
2303 return -EINVAL;
2304
2305 /* Initialize NMBM instance */
2306 memcpy(&ni->lower, nld, sizeof(struct nmbm_lower_device));
2307 nmbm_init_structure(ni);
2308
2309 success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
2310 if (!success) {
2311 if (!(nld->flags & NMBM_F_CREATE)) {
2312 nlog_err(ni, "Signature not found\n");
2313 return -ENODEV;
2314 }
2315
2316 success = nmbm_create_new(ni);
2317 if (!success)
2318 return -ENODEV;
2319
2320 return 0;
2321 }
2322
2323 nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
2324 ni->signature_ba, ba2addr(ni, ni->signature_ba));
2325 nmbm_mark_block_color_signature(ni, ni->signature_ba);
2326
2327 if (ni->signature.header.version != NMBM_VER) {
2328 nlog_err(ni, "NMBM version %u.%u is not supported\n",
2329 NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
2330 NMBM_VERSION_MINOR_GET(ni->signature.header.version));
2331 return -EINVAL;
2332 }
2333
2334 if (ni->signature.nand_size != nld->size ||
2335 ni->signature.block_size != nld->erasesize ||
2336 ni->signature.page_size != nld->writesize ||
2337 ni->signature.spare_size != nld->oobsize) {
2338 nlog_err(ni, "NMBM configuration mismatch\n");
2339 return -EINVAL;
2340 }
2341
2342 success = nmbm_load_existing(ni);
2343 if (!success)
2344 return -ENODEV;
2345
2346 return 0;
2347}
2348
2349/*
2350 * nmbm_detach - Detach from a lower device, and save all tables
2351 * @ni: NMBM instance structure
2352 */
2353int nmbm_detach(struct nmbm_instance *ni)
2354{
2355 if (!ni)
2356 return -EINVAL;
2357
2358 nmbm_update_info_table(ni);
2359
2360 nmbm_mark_block_color_normal(ni, 0, ni->block_count - 1);
2361
2362 return 0;
2363}
2364
2365/*
2366 * nmbm_erase_logic_block - Erase a logic block
2367 * @ni: NMBM instance structure
2368 * @nmbm_erase_logic_block: logic block address
2369 *
2370 * Logic block will be mapped to physical block before erasing.
2371 * Bad block found during erasinh will be remapped to a good block if there is
2372 * still at least one good spare block available.
2373 */
2374static int nmbm_erase_logic_block(struct nmbm_instance *ni, uint32_t block_addr)
2375{
2376 uint32_t pb;
2377 bool success;
2378
2379retry:
2380 /* Map logic block to physical block */
2381 pb = ni->block_mapping[block_addr];
2382
2383 /* Whether the logic block is good (has valid mapping) */
2384 if ((int32_t)pb < 0) {
2385 nlog_debug(ni, "Logic block %u is a bad block\n", block_addr);
2386 return -EIO;
2387 }
2388
2389 /* Remap logic block if current physical block is a bad block */
2390 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD ||
2391 nmbm_get_block_state(ni, pb) == BLOCK_ST_NEED_REMAP)
2392 goto remap_logic_block;
developer4f9017d2021-06-16 17:18:47 +08002393
2394 /* Insurance to detect unexpected bad block marked by user */
2395 if (nmbm_check_bad_phys_block(ni, pb)) {
2396 nlog_warn(ni, "Found unexpected bad block possibly marked by user\n");
2397 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2398 goto remap_logic_block;
2399 }
developer8d16ac22021-05-26 15:32:12 +08002400
developer28a313b2021-06-16 17:23:34 +08002401 success = nmbm_erase_block_and_check(ni, pb);
developer8d16ac22021-05-26 15:32:12 +08002402 if (success)
2403 return 0;
2404
2405 /* Mark bad block */
2406 nmbm_mark_phys_bad_block(ni, pb);
2407 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2408
2409remap_logic_block:
2410 /* Try to assign a new block */
2411 success = nmbm_map_block(ni, block_addr);
2412 if (!success) {
2413 /* Mark logic block unusable, and update info table */
2414 ni->block_mapping[block_addr] = -1;
2415 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2416 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2417 nmbm_update_info_table(ni);
2418 return -EIO;
2419 }
2420
2421 /* Update info table before erasing */
2422 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2423 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2424 nmbm_update_info_table(ni);
2425
2426 goto retry;
2427}
2428
2429/*
2430 * nmbm_erase_block_range - Erase logic blocks
2431 * @ni: NMBM instance structure
2432 * @addr: logic linear address
2433 * @size: erase range
2434 * @failed_addr: return failed block address if error occurs
2435 */
2436int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
2437 uint64_t size, uint64_t *failed_addr)
2438{
2439 uint32_t start_ba, end_ba;
2440 int ret;
2441
2442 if (!ni)
2443 return -EINVAL;
2444
2445 /* Sanity check */
2446 if (ni->protected) {
2447 nlog_debug(ni, "Device is forced read-only\n");
2448 return -EROFS;
2449 }
2450
2451 if (addr >= ba2addr(ni, ni->data_block_count)) {
2452 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2453 return -EINVAL;
2454 }
2455
2456 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2457 nlog_err(ni, "Erase range 0xllxu is too large\n", size);
2458 return -EINVAL;
2459 }
2460
2461 if (!size) {
2462 nlog_warn(ni, "No blocks to be erased\n");
2463 return 0;
2464 }
2465
2466 start_ba = addr2ba(ni, addr);
2467 end_ba = addr2ba(ni, addr + size - 1);
2468
2469 while (start_ba <= end_ba) {
2470 WATCHDOG_RESET();
2471
2472 ret = nmbm_erase_logic_block(ni, start_ba);
2473 if (ret) {
2474 if (failed_addr)
2475 *failed_addr = ba2addr(ni, start_ba);
2476 return ret;
2477 }
2478
2479 start_ba++;
2480 }
2481
2482 return 0;
2483}
2484
2485/*
2486 * nmbm_read_logic_page - Read page based on logic address
2487 * @ni: NMBM instance structure
2488 * @addr: logic linear address
2489 * @data: buffer to store main data. optional.
2490 * @oob: buffer to store oob data. optional.
2491 * @mode: read mode
developerd1457c92021-06-16 17:23:18 +08002492 *
2493 * Return 0 for success, positive value for corrected bitflip count,
2494 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002495 */
2496static int nmbm_read_logic_page(struct nmbm_instance *ni, uint64_t addr,
2497 void *data, void *oob, enum nmbm_oob_mode mode)
2498{
2499 uint32_t lb, pb, offset;
2500 uint64_t paddr;
2501 int ret;
2502
2503 /* Extract block address and in-block offset */
2504 lb = addr2ba(ni, addr);
2505 offset = addr & ni->erasesize_mask;
2506
2507 /* Map logic block to physical block */
2508 pb = ni->block_mapping[lb];
2509
2510 /* Whether the logic block is good (has valid mapping) */
2511 if ((int32_t)pb < 0) {
2512 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2513 return -EIO;
2514 }
2515
2516 /* Fail if physical block is marked bad */
2517 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2518 return -EIO;
2519
2520 /* Assemble new address */
2521 paddr = ba2addr(ni, pb) + offset;
2522
2523 ret = nmbm_read_phys_page(ni, paddr, data, oob, mode);
developerd1457c92021-06-16 17:23:18 +08002524 if (ret >= 0 || ret == -EBADMSG)
2525 return ret;
developer8d16ac22021-05-26 15:32:12 +08002526
2527 /*
2528 * Do not remap bad block here. Just mark this block in state table.
2529 * Remap this block on erasing.
2530 */
2531 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2532 nmbm_update_info_table(ni);
2533
developerd1457c92021-06-16 17:23:18 +08002534 return ret;
developer8d16ac22021-05-26 15:32:12 +08002535}
2536
2537/*
2538 * nmbm_read_single_page - Read one page based on logic address
2539 * @ni: NMBM instance structure
2540 * @addr: logic linear address
2541 * @data: buffer to store main data. optional.
2542 * @oob: buffer to store oob data. optional.
2543 * @mode: read mode
developerd1457c92021-06-16 17:23:18 +08002544 *
2545 * Return 0 for success, positive value for corrected bitflip count,
2546 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002547 */
2548int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
2549 void *oob, enum nmbm_oob_mode mode)
2550{
2551 if (!ni)
2552 return -EINVAL;
2553
2554 /* Sanity check */
2555 if (ni->protected) {
2556 nlog_debug(ni, "Device is forced read-only\n");
2557 return -EROFS;
2558 }
2559
2560 if (addr >= ba2addr(ni, ni->data_block_count)) {
2561 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2562 return -EINVAL;
2563 }
2564
2565 return nmbm_read_logic_page(ni, addr, data, oob, mode);
2566}
2567
2568/*
2569 * nmbm_read_range - Read data without oob
2570 * @ni: NMBM instance structure
2571 * @addr: logic linear address
2572 * @size: data size to read
2573 * @data: buffer to store main data to be read
2574 * @mode: read mode
2575 * @retlen: return actual data size read
developerd1457c92021-06-16 17:23:18 +08002576 *
2577 * Return 0 for success, positive value for corrected bitflip count,
2578 * -EBADMSG for ecc error, other negative values for other errors
developer8d16ac22021-05-26 15:32:12 +08002579 */
2580int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2581 void *data, enum nmbm_oob_mode mode, size_t *retlen)
2582{
2583 uint64_t off = addr;
2584 uint8_t *ptr = data;
2585 size_t sizeremain = size, chunksize, leading;
developerd1457c92021-06-16 17:23:18 +08002586 bool has_ecc_err = false;
2587 int ret, max_bitflips = 0;
developer8d16ac22021-05-26 15:32:12 +08002588
2589 if (!ni)
2590 return -EINVAL;
2591
2592 /* Sanity check */
2593 if (ni->protected) {
2594 nlog_debug(ni, "Device is forced read-only\n");
2595 return -EROFS;
2596 }
2597
2598 if (addr >= ba2addr(ni, ni->data_block_count)) {
2599 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2600 return -EINVAL;
2601 }
2602
2603 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2604 nlog_err(ni, "Read range 0x%llx is too large\n", size);
2605 return -EINVAL;
2606 }
2607
2608 if (!size) {
2609 nlog_warn(ni, "No data to be read\n");
2610 return 0;
2611 }
2612
2613 while (sizeremain) {
2614 WATCHDOG_RESET();
2615
2616 leading = off & ni->writesize_mask;
2617 chunksize = ni->lower.writesize - leading;
2618 if (chunksize > sizeremain)
2619 chunksize = sizeremain;
2620
2621 if (chunksize == ni->lower.writesize) {
2622 ret = nmbm_read_logic_page(ni, off - leading, ptr,
2623 NULL, mode);
developerd1457c92021-06-16 17:23:18 +08002624 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +08002625 break;
2626 } else {
2627 ret = nmbm_read_logic_page(ni, off - leading,
2628 ni->page_cache, NULL,
2629 mode);
developerd1457c92021-06-16 17:23:18 +08002630 if (ret < 0 && ret != -EBADMSG)
developer8d16ac22021-05-26 15:32:12 +08002631 break;
2632
2633 memcpy(ptr, ni->page_cache + leading, chunksize);
2634 }
2635
developerd1457c92021-06-16 17:23:18 +08002636 if (ret == -EBADMSG)
2637 has_ecc_err = true;
2638
2639 if (ret > max_bitflips)
2640 max_bitflips = ret;
2641
developer8d16ac22021-05-26 15:32:12 +08002642 off += chunksize;
2643 ptr += chunksize;
2644 sizeremain -= chunksize;
2645 }
2646
2647 if (retlen)
2648 *retlen = size - sizeremain;
2649
developerd1457c92021-06-16 17:23:18 +08002650 if (ret < 0 && ret != -EBADMSG)
2651 return ret;
2652
2653 if (has_ecc_err)
2654 return -EBADMSG;
2655
2656 return max_bitflips;
developer8d16ac22021-05-26 15:32:12 +08002657}
2658
2659/*
2660 * nmbm_write_logic_page - Read page based on logic address
2661 * @ni: NMBM instance structure
2662 * @addr: logic linear address
2663 * @data: buffer contains main data. optional.
2664 * @oob: buffer contains oob data. optional.
2665 * @mode: write mode
2666 */
2667static int nmbm_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
2668 const void *data, const void *oob,
2669 enum nmbm_oob_mode mode)
2670{
2671 uint32_t lb, pb, offset;
2672 uint64_t paddr;
2673 bool success;
2674
2675 /* Extract block address and in-block offset */
2676 lb = addr2ba(ni, addr);
2677 offset = addr & ni->erasesize_mask;
2678
2679 /* Map logic block to physical block */
2680 pb = ni->block_mapping[lb];
2681
2682 /* Whether the logic block is good (has valid mapping) */
2683 if ((int32_t)pb < 0) {
2684 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2685 return -EIO;
2686 }
2687
2688 /* Fail if physical block is marked bad */
2689 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2690 return -EIO;
2691
2692 /* Assemble new address */
2693 paddr = ba2addr(ni, pb) + offset;
2694
2695 success = nmbm_write_phys_page(ni, paddr, data, oob, mode);
2696 if (success)
2697 return 0;
2698
2699 /*
2700 * Do not remap bad block here. Just mark this block in state table.
2701 * Remap this block on erasing.
2702 */
2703 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2704 nmbm_update_info_table(ni);
2705
2706 return -EIO;
2707}
2708
2709/*
2710 * nmbm_write_single_page - Write one page based on logic address
2711 * @ni: NMBM instance structure
2712 * @addr: logic linear address
2713 * @data: buffer contains main data. optional.
2714 * @oob: buffer contains oob data. optional.
2715 * @mode: write mode
2716 */
2717int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
2718 const void *data, const void *oob,
2719 enum nmbm_oob_mode mode)
2720{
2721 if (!ni)
2722 return -EINVAL;
2723
2724 /* Sanity check */
2725 if (ni->protected) {
2726 nlog_debug(ni, "Device is forced read-only\n");
2727 return -EROFS;
2728 }
2729
2730 if (addr >= ba2addr(ni, ni->data_block_count)) {
2731 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2732 return -EINVAL;
2733 }
2734
2735 return nmbm_write_logic_page(ni, addr, data, oob, mode);
2736}
2737
2738/*
2739 * nmbm_write_range - Write data without oob
2740 * @ni: NMBM instance structure
2741 * @addr: logic linear address
2742 * @size: data size to write
2743 * @data: buffer contains data to be written
2744 * @mode: write mode
2745 * @retlen: return actual data size written
2746 */
2747int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2748 const void *data, enum nmbm_oob_mode mode,
2749 size_t *retlen)
2750{
2751 uint64_t off = addr;
2752 const uint8_t *ptr = data;
2753 size_t sizeremain = size, chunksize, leading;
2754 int ret;
2755
2756 if (!ni)
2757 return -EINVAL;
2758
2759 /* Sanity check */
2760 if (ni->protected) {
2761 nlog_debug(ni, "Device is forced read-only\n");
2762 return -EROFS;
2763 }
2764
2765 if (addr >= ba2addr(ni, ni->data_block_count)) {
2766 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2767 return -EINVAL;
2768 }
2769
2770 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2771 nlog_err(ni, "Write size 0x%zx is too large\n", size);
2772 return -EINVAL;
2773 }
2774
2775 if (!size) {
2776 nlog_warn(ni, "No data to be written\n");
2777 return 0;
2778 }
2779
2780 while (sizeremain) {
2781 WATCHDOG_RESET();
2782
2783 leading = off & ni->writesize_mask;
2784 chunksize = ni->lower.writesize - leading;
2785 if (chunksize > sizeremain)
2786 chunksize = sizeremain;
2787
2788 if (chunksize == ni->lower.writesize) {
2789 ret = nmbm_write_logic_page(ni, off - leading, ptr,
2790 NULL, mode);
2791 if (ret)
2792 break;
2793 } else {
2794 memset(ni->page_cache, 0xff, leading);
2795 memcpy(ni->page_cache + leading, ptr, chunksize);
2796
2797 ret = nmbm_write_logic_page(ni, off - leading,
2798 ni->page_cache, NULL,
2799 mode);
2800 if (ret)
2801 break;
2802 }
2803
2804 off += chunksize;
2805 ptr += chunksize;
2806 sizeremain -= chunksize;
2807 }
2808
2809 if (retlen)
2810 *retlen = size - sizeremain;
2811
2812 return ret;
2813}
2814
2815/*
2816 * nmbm_check_bad_block - Check whether a logic block is usable
2817 * @ni: NMBM instance structure
2818 * @addr: logic linear address
2819 */
2820int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr)
2821{
2822 uint32_t lb, pb;
2823
2824 if (!ni)
2825 return -EINVAL;
2826
2827 if (addr >= ba2addr(ni, ni->data_block_count)) {
2828 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2829 return -EINVAL;
2830 }
2831
2832 lb = addr2ba(ni, addr);
2833
2834 /* Map logic block to physical block */
2835 pb = ni->block_mapping[lb];
2836
2837 if ((int32_t)pb < 0)
2838 return 1;
2839
2840 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2841 return 1;
2842
2843 return 0;
2844}
2845
2846/*
2847 * nmbm_mark_bad_block - Mark a logic block unusable
2848 * @ni: NMBM instance structure
2849 * @addr: logic linear address
2850 */
2851int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr)
2852{
2853 uint32_t lb, pb;
2854
2855 if (!ni)
2856 return -EINVAL;
2857
2858 if (addr >= ba2addr(ni, ni->data_block_count)) {
2859 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2860 return -EINVAL;
2861 }
2862
2863 lb = addr2ba(ni, addr);
2864
2865 /* Map logic block to physical block */
2866 pb = ni->block_mapping[lb];
2867
2868 if ((int32_t)pb < 0)
2869 return 0;
2870
2871 ni->block_mapping[lb] = -1;
2872 nmbm_mark_phys_bad_block(ni, pb);
2873 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2874 nmbm_update_info_table(ni);
2875
2876 return 0;
2877}
2878
2879/*
2880 * nmbm_get_avail_size - Get available user data size
2881 * @ni: NMBM instance structure
2882 */
2883uint64_t nmbm_get_avail_size(struct nmbm_instance *ni)
2884{
2885 if (!ni)
2886 return 0;
2887
2888 return (uint64_t)ni->data_block_count << ni->erasesize_shift;
2889}
2890
2891/*
2892 * nmbm_get_lower_device - Get lower device structure
2893 * @ni: NMBM instance structure
2894 * @nld: pointer to hold the data of lower device structure
2895 */
2896int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld)
2897{
2898 if (!ni)
2899 return -EINVAL;
2900
2901 if (nld)
2902 memcpy(nld, &ni->lower, sizeof(*nld));
2903
2904 return 0;
2905}
2906
2907#include "nmbm-debug.inl"