blob: 9419832deb5764d443e7a06b951b09d722cdcc7e [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2021 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include "nmbm-private.h"
9
10#include "nmbm-debug.h"
11
12#define NMBM_VER_MAJOR 1
13#define NMBM_VER_MINOR 0
14#define NMBM_VER NMBM_VERSION_MAKE(NMBM_VER_MAJOR, \
15 NMBM_VER_MINOR)
16
17#define NMBM_ALIGN(v, a) (((v) + (a) - 1) & ~((a) - 1))
18
19/*****************************************************************************/
20/* Logging related functions */
21/*****************************************************************************/
22
23/*
24 * nmbm_log_lower - Print log using OS specific routine
25 * @nld: NMBM lower device structure
26 * @level: log level
27 * @fmt: format string
28 */
29static void nmbm_log_lower(struct nmbm_lower_device *nld,
30 enum nmbm_log_category level, const char *fmt, ...)
31{
32 va_list ap;
33
34 if (!nld->logprint)
35 return;
36
37 va_start(ap, fmt);
38 nld->logprint(nld->arg, level, fmt, ap);
39 va_end(ap);
40}
41
42/*
43 * nmbm_log - Print log using OS specific routine
44 * @ni: NMBM instance structure
45 * @level: log level
46 * @fmt: format string
47 */
48static void nmbm_log(struct nmbm_instance *ni, enum nmbm_log_category level,
49 const char *fmt, ...)
50{
51 va_list ap;
52
53 if (!ni)
54 return;
55
56 if (!ni->lower.logprint || level < ni->log_display_level)
57 return;
58
59 va_start(ap, fmt);
60 ni->lower.logprint(ni->lower.arg, level, fmt, ap);
61 va_end(ap);
62}
63
64/*
65 * nmbm_set_log_level - Set log display level
66 * @ni: NMBM instance structure
67 * @level: log display level
68 */
69enum nmbm_log_category nmbm_set_log_level(struct nmbm_instance *ni,
70 enum nmbm_log_category level)
71{
72 enum nmbm_log_category old;
73
74 if (!ni)
75 return __NMBM_LOG_MAX;
76
77 old = ni->log_display_level;
78 ni->log_display_level = level;
79 return old;
80}
81
82/*
83 * nlog_table_creation - Print log of table creation event
84 * @ni: NMBM instance structure
85 * @main_table: whether the table is main info table
86 * @start_ba: start block address of the table
87 * @end_ba: block address after the end of the table
88 */
89static void nlog_table_creation(struct nmbm_instance *ni, bool main_table,
90 uint32_t start_ba, uint32_t end_ba)
91{
92 if (start_ba == end_ba - 1)
93 nlog_info(ni, "%s info table has been written to block %u\n",
94 main_table ? "Main" : "Backup", start_ba);
95 else
96 nlog_info(ni, "%s info table has been written to block %u-%u\n",
97 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
98
99 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
100}
101
102/*
103 * nlog_table_update - Print log of table update event
104 * @ni: NMBM instance structure
105 * @main_table: whether the table is main info table
106 * @start_ba: start block address of the table
107 * @end_ba: block address after the end of the table
108 */
109static void nlog_table_update(struct nmbm_instance *ni, bool main_table,
110 uint32_t start_ba, uint32_t end_ba)
111{
112 if (start_ba == end_ba - 1)
113 nlog_debug(ni, "%s info table has been updated in block %u\n",
114 main_table ? "Main" : "Backup", start_ba);
115 else
116 nlog_debug(ni, "%s info table has been updated in block %u-%u\n",
117 main_table ? "Main" : "Backup", start_ba, end_ba - 1);
118
119 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
120}
121
122/*
123 * nlog_table_found - Print log of table found event
124 * @ni: NMBM instance structure
125 * @first_table: whether the table is first found info table
126 * @write_count: write count of the info table
127 * @start_ba: start block address of the table
128 * @end_ba: block address after the end of the table
129 */
130static void nlog_table_found(struct nmbm_instance *ni, bool first_table,
131 uint32_t write_count, uint32_t start_ba,
132 uint32_t end_ba)
133{
134 if (start_ba == end_ba - 1)
135 nlog_info(ni, "%s info table with writecount %u found in block %u\n",
136 first_table ? "First" : "Second", write_count,
137 start_ba);
138 else
139 nlog_info(ni, "%s info table with writecount %u found in block %u-%u\n",
140 first_table ? "First" : "Second", write_count,
141 start_ba, end_ba - 1);
142
143 nmbm_mark_block_color_info_table(ni, start_ba, end_ba - 1);
144}
145
146/*****************************************************************************/
147/* Address conversion functions */
148/*****************************************************************************/
149
150/*
151 * addr2ba - Convert a linear address to block address
152 * @ni: NMBM instance structure
153 * @addr: Linear address
154 */
155static uint32_t addr2ba(struct nmbm_instance *ni, uint64_t addr)
156{
157 return addr >> ni->erasesize_shift;
158}
159
160/*
161 * ba2addr - Convert a block address to linear address
162 * @ni: NMBM instance structure
163 * @ba: Block address
164 */
165static uint64_t ba2addr(struct nmbm_instance *ni, uint32_t ba)
166{
167 return (uint64_t)ba << ni->erasesize_shift;
168}
169/*
170 * size2blk - Get minimum required blocks for storing specific size of data
171 * @ni: NMBM instance structure
172 * @size: size for storing
173 */
174static uint32_t size2blk(struct nmbm_instance *ni, uint64_t size)
175{
176 return (size + ni->lower.erasesize - 1) >> ni->erasesize_shift;
177}
178
179/*****************************************************************************/
180/* High level NAND chip APIs */
181/*****************************************************************************/
182
183/*
184 * nmbm_reset_chip - Reset NAND device
185 * @nld: Lower NAND chip structure
186 */
187static void nmbm_reset_chip(struct nmbm_instance *ni)
188{
189 if (ni->lower.reset_chip)
190 ni->lower.reset_chip(ni->lower.arg);
191}
192
193/*
194 * nmbm_read_phys_page - Read page with retry
195 * @ni: NMBM instance structure
196 * @addr: linear address where the data will be read from
197 * @data: the main data to be read
198 * @oob: the oob data to be read
199 * @mode: mode for processing oob data
200 *
201 * Read a page for at most NMBM_TRY_COUNT times.
202 *
203 * Return 0 for success, positive value for ecc error,
204 * negative value for other errors
205 */
206static int nmbm_read_phys_page(struct nmbm_instance *ni, uint64_t addr,
207 void *data, void *oob, enum nmbm_oob_mode mode)
208{
209 int tries, ret;
210
211 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
212 ret = ni->lower.read_page(ni->lower.arg, addr, data, oob, mode);
213 if (!ret)
214 return 0;
215
216 nmbm_reset_chip(ni);
217 }
218
219 if (ret < 0)
220 nlog_err(ni, "Page read failed at address 0x%08llx\n", addr);
221
222 return ret;
223}
224
225/*
226 * nmbm_write_phys_page - Write page with retry
227 * @ni: NMBM instance structure
228 * @addr: linear address where the data will be written to
229 * @data: the main data to be written
230 * @oob: the oob data to be written
231 * @mode: mode for processing oob data
232 *
233 * Write a page for at most NMBM_TRY_COUNT times.
234 */
235static bool nmbm_write_phys_page(struct nmbm_instance *ni, uint64_t addr,
236 const void *data, const void *oob,
237 enum nmbm_oob_mode mode)
238{
239 int tries, ret;
240
241 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
242 ret = ni->lower.write_page(ni->lower.arg, addr, data, oob, mode);
243 if (!ret)
244 return true;
245
246 nmbm_reset_chip(ni);
247 }
248
249 nlog_err(ni, "Page write failed at address 0x%08llx\n", addr);
250
251 return false;
252}
253
254/*
255 * nmbm_erase_phys_block - Erase a block with retry
256 * @ni: NMBM instance structure
257 * @addr: Linear address
258 *
259 * Erase a block for at most NMBM_TRY_COUNT times.
260 */
261static bool nmbm_erase_phys_block(struct nmbm_instance *ni, uint64_t addr)
262{
263 int tries, ret;
264
265 for (tries = 0; tries < NMBM_TRY_COUNT; tries++) {
266 ret = ni->lower.erase_block(ni->lower.arg, addr);
267 if (!ret)
268 return true;
269
270 nmbm_reset_chip(ni);
271 }
272
273 nlog_err(ni, "Block erasure failed at address 0x%08llx\n", addr);
274
275 return false;
276}
277
278/*
279 * nmbm_check_bad_phys_block - Check whether a block is marked bad in OOB
280 * @ni: NMBM instance structure
281 * @ba: block address
282 */
283static bool nmbm_check_bad_phys_block(struct nmbm_instance *ni, uint32_t ba)
284{
285 uint64_t addr = ba2addr(ni, ba);
286 int ret;
287
288 if (ni->lower.is_bad_block)
289 return ni->lower.is_bad_block(ni->lower.arg, addr);
290
291 /* Treat ECC error as read success */
292 ret = nmbm_read_phys_page(ni, addr, NULL,
293 ni->page_cache + ni->lower.writesize,
294 NMBM_MODE_PLACE_OOB);
295 if (ret < 0)
296 return true;
297
298 return ni->page_cache[ni->lower.writesize] != 0xff;
299}
300
301/*
302 * nmbm_mark_phys_bad_block - Mark a block bad
303 * @ni: NMBM instance structure
304 * @addr: Linear address
305 */
306static int nmbm_mark_phys_bad_block(struct nmbm_instance *ni, uint32_t ba)
307{
308 uint64_t addr = ba2addr(ni, ba);
309 enum nmbm_log_category level;
310 uint32_t off;
311
312 nlog_info(ni, "Block %u [0x%08llx] will be marked bad\n", ba, addr);
313
314 if (ni->lower.mark_bad_block)
315 return ni->lower.mark_bad_block(ni->lower.arg, addr);
316
317 /* Whole page set to 0x00 */
318 memset(ni->page_cache, 0, ni->rawpage_size);
319
320 /* Write to all pages within this block, disable all errors */
321 level = nmbm_set_log_level(ni, __NMBM_LOG_MAX);
322
323 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
324 nmbm_write_phys_page(ni, addr + off, ni->page_cache,
325 ni->page_cache + ni->lower.writesize,
326 NMBM_MODE_RAW);
327 }
328
329 nmbm_set_log_level(ni, level);
330
331 return 0;
332}
333
334/*****************************************************************************/
335/* NMBM related functions */
336/*****************************************************************************/
337
338/*
339 * nmbm_check_header - Check whether a NMBM structure is valid
340 * @data: pointer to a NMBM structure with a NMBM header at beginning
341 * @size: Size of the buffer pointed by @header
342 *
343 * The size of the NMBM structure may be larger than NMBM header,
344 * e.g. block mapping table and block state table.
345 */
346static bool nmbm_check_header(const void *data, uint32_t size)
347{
348 const struct nmbm_header *header = data;
349 struct nmbm_header nhdr;
350 uint32_t new_checksum;
351
352 /*
353 * Make sure expected structure size is equal or smaller than
354 * buffer size.
355 */
356 if (header->size > size)
357 return false;
358
359 memcpy(&nhdr, data, sizeof(nhdr));
360
361 nhdr.checksum = 0;
362 new_checksum = nmbm_crc32(0, &nhdr, sizeof(nhdr));
363 if (header->size > sizeof(nhdr))
364 new_checksum = nmbm_crc32(new_checksum,
365 (const uint8_t *)data + sizeof(nhdr),
366 header->size - sizeof(nhdr));
367
368 if (header->checksum != new_checksum)
369 return false;
370
371 return true;
372}
373
374/*
375 * nmbm_update_checksum - Update checksum of a NMBM structure
376 * @header: pointer to a NMBM structure with a NMBM header at beginning
377 *
378 * The size of the NMBM structure must be specified by @header->size
379 */
380static void nmbm_update_checksum(struct nmbm_header *header)
381{
382 header->checksum = 0;
383 header->checksum = nmbm_crc32(0, header, header->size);
384}
385
386/*
387 * nmbm_get_spare_block_count - Calculate number of blocks should be reserved
388 * @block_count: number of blocks of data
389 *
390 * Calculate number of blocks should be reserved for data
391 */
392static uint32_t nmbm_get_spare_block_count(uint32_t block_count)
393{
394 uint32_t val;
395
396 val = (block_count + NMBM_SPARE_BLOCK_DIV / 2) / NMBM_SPARE_BLOCK_DIV;
397 val *= NMBM_SPARE_BLOCK_MULTI;
398
399 if (val < NMBM_SPARE_BLOCK_MIN)
400 val = NMBM_SPARE_BLOCK_MIN;
401
402 return val;
403}
404
405/*
406 * nmbm_get_block_state_raw - Get state of a block from raw block state table
407 * @block_state: pointer to raw block state table (bitmap)
408 * @ba: block address
409 */
410static uint32_t nmbm_get_block_state_raw(nmbm_bitmap_t *block_state,
411 uint32_t ba)
412{
413 uint32_t unit, shift;
414
415 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
416 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
417
418 return (block_state[unit] >> shift) & BLOCK_ST_MASK;
419}
420
421/*
422 * nmbm_get_block_state - Get state of a block from block state table
423 * @ni: NMBM instance structure
424 * @ba: block address
425 */
426static uint32_t nmbm_get_block_state(struct nmbm_instance *ni, uint32_t ba)
427{
428 return nmbm_get_block_state_raw(ni->block_state, ba);
429}
430
431/*
432 * nmbm_set_block_state - Set state of a block to block state table
433 * @ni: NMBM instance structure
434 * @ba: block address
435 * @state: block state
436 *
437 * Set state of a block. If the block state changed, ni->block_state_changed
438 * will be increased.
439 */
440static bool nmbm_set_block_state(struct nmbm_instance *ni, uint32_t ba,
441 uint32_t state)
442{
443 uint32_t unit, shift, orig;
444 nmbm_bitmap_t uv;
445
446 unit = ba / NMBM_BITMAP_BLOCKS_PER_UNIT;
447 shift = (ba % NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_BITS_PER_BLOCK;
448
449 orig = (ni->block_state[unit] >> shift) & BLOCK_ST_MASK;
450 state &= BLOCK_ST_MASK;
451
452 uv = ni->block_state[unit] & (~(BLOCK_ST_MASK << shift));
453 uv |= state << shift;
454 ni->block_state[unit] = uv;
455
456 if (state == BLOCK_ST_BAD)
457 nmbm_mark_block_color_bad(ni, ba);
458
459 if (orig != state) {
460 ni->block_state_changed++;
461 return true;
462 }
463
464 return false;
465}
466
467/*
468 * nmbm_block_walk_asc - Skip specified number of good blocks, ascending addr.
469 * @ni: NMBM instance structure
470 * @ba: start physical block address
471 * @nba: return physical block address after walk
472 * @count: number of good blocks to be skipped
473 * @limit: highest block address allowed for walking
474 *
475 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
476 * return the next good block address.
477 *
478 * If no enough good blocks counted while @limit reached, false will be returned.
479 *
480 * If @count == 0, nearest good block address will be returned.
481 * @limit is not counted in walking.
482 */
483static bool nmbm_block_walk_asc(struct nmbm_instance *ni, uint32_t ba,
484 uint32_t *nba, uint32_t count,
485 uint32_t limit)
486{
487 int32_t nblock = count;
488
489 if (limit >= ni->block_count)
490 limit = ni->block_count - 1;
491
492 while (ba < limit) {
493 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
494 nblock--;
495
496 if (nblock < 0) {
497 *nba = ba;
498 return true;
499 }
500
501 ba++;
502 }
503
504 return false;
505}
506
507/*
508 * nmbm_block_walk_desc - Skip specified number of good blocks, descending addr
509 * @ni: NMBM instance structure
510 * @ba: start physical block address
511 * @nba: return physical block address after walk
512 * @count: number of good blocks to be skipped
513 * @limit: lowest block address allowed for walking
514 *
515 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
516 * return the next good block address.
517 *
518 * If no enough good blocks counted while @limit reached, false will be returned.
519 *
520 * If @count == 0, nearest good block address will be returned.
521 * @limit is not counted in walking.
522 */
523static bool nmbm_block_walk_desc(struct nmbm_instance *ni, uint32_t ba,
524 uint32_t *nba, uint32_t count, uint32_t limit)
525{
526 int32_t nblock = count;
527
528 if (limit >= ni->block_count)
529 limit = ni->block_count - 1;
530
531 while (ba > limit) {
532 if (nmbm_get_block_state(ni, ba) == BLOCK_ST_GOOD)
533 nblock--;
534
535 if (nblock < 0) {
536 *nba = ba;
537 return true;
538 }
539
540 ba--;
541 }
542
543 return false;
544}
545
546/*
547 * nmbm_block_walk - Skip specified number of good blocks from curr. block addr
548 * @ni: NMBM instance structure
549 * @ascending: whether to walk ascending
550 * @ba: start physical block address
551 * @nba: return physical block address after walk
552 * @count: number of good blocks to be skipped
553 * @limit: highest/lowest block address allowed for walking
554 *
555 * Start from @ba, skipping any bad blocks, counting @count good blocks, and
556 * return the next good block address.
557 *
558 * If no enough good blocks counted while @limit reached, false will be returned.
559 *
560 * If @count == 0, nearest good block address will be returned.
561 * @limit can be set to negative if no limit required.
562 * @limit is not counted in walking.
563 */
564static bool nmbm_block_walk(struct nmbm_instance *ni, bool ascending,
565 uint32_t ba, uint32_t *nba, int32_t count,
566 int32_t limit)
567{
568 if (ascending)
569 return nmbm_block_walk_asc(ni, ba, nba, count, limit);
570
571 return nmbm_block_walk_desc(ni, ba, nba, count, limit);
572}
573
574/*
575 * nmbm_scan_badblocks - Scan and record all bad blocks
576 * @ni: NMBM instance structure
577 *
578 * Scan the entire lower NAND chip and record all bad blocks in to block state
579 * table.
580 */
581static void nmbm_scan_badblocks(struct nmbm_instance *ni)
582{
583 uint32_t ba;
584
585 for (ba = 0; ba < ni->block_count; ba++) {
586 if (nmbm_check_bad_phys_block(ni, ba)) {
587 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
588 nlog_info(ni, "Bad block %u [0x%08llx]\n", ba,
589 ba2addr(ni, ba));
590 }
591 }
592}
593
594/*
595 * nmbm_build_mapping_table - Build initial block mapping table
596 * @ni: NMBM instance structure
597 *
598 * The initial mapping table will be compatible with the stratage of
599 * factory production.
600 */
601static void nmbm_build_mapping_table(struct nmbm_instance *ni)
602{
603 uint32_t pb, lb;
604
605 for (pb = 0, lb = 0; pb < ni->mgmt_start_ba; pb++) {
606 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
607 continue;
608
609 /* Always map to the next good block */
610 ni->block_mapping[lb++] = pb;
611 }
612
613 ni->data_block_count = lb;
614
615 /* Unusable/Management blocks */
616 for (pb = lb; pb < ni->block_count; pb++)
617 ni->block_mapping[pb] = -1;
618}
619
620/*
621 * nmbm_erase_range - Erase a range of blocks
622 * @ni: NMBM instance structure
623 * @ba: block address where the erasure will start
624 * @limit: top block address allowed for erasure
625 *
626 * Erase blocks within the specific range. Newly-found bad blocks will be
627 * marked.
628 *
629 * @limit is not counted into the allowed erasure address.
630 */
631static void nmbm_erase_range(struct nmbm_instance *ni, uint32_t ba,
632 uint32_t limit)
633{
634 bool success;
635
636 while (ba < limit) {
637 WATCHDOG_RESET();
638
639 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
640 goto next_block;
641
642 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
643 if (success)
644 goto next_block;
645
646 nmbm_mark_phys_bad_block(ni, ba);
647 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
648
649 next_block:
650 ba++;
651 }
652}
653
654/*
655 * nmbm_write_repeated_data - Write critical data to a block with retry
656 * @ni: NMBM instance structure
657 * @ba: block address where the data will be written to
658 * @data: the data to be written
659 * @size: size of the data
660 *
661 * Write data to every page of the block. Success only if all pages within
662 * this block have been successfully written.
663 *
664 * Make sure data size is not bigger than one page.
665 *
666 * This function will write and verify every page for at most
667 * NMBM_TRY_COUNT times.
668 */
669static bool nmbm_write_repeated_data(struct nmbm_instance *ni, uint32_t ba,
670 const void *data, uint32_t size)
671{
672 uint64_t addr, off;
673 bool success;
674 int ret;
675
676 if (size > ni->lower.writesize)
677 return false;
678
679 addr = ba2addr(ni, ba);
680
681 for (off = 0; off < ni->lower.erasesize; off += ni->lower.writesize) {
682 WATCHDOG_RESET();
683
684 /* Prepare page data. fill 0xff to unused region */
685 memcpy(ni->page_cache, data, size);
686 memset(ni->page_cache + size, 0xff, ni->rawpage_size - size);
687
688 success = nmbm_write_phys_page(ni, addr + off, ni->page_cache,
689 NULL, NMBM_MODE_PLACE_OOB);
690 if (!success)
691 return false;
692
693 /* Verify the data just written. ECC error indicates failure */
694 ret = nmbm_read_phys_page(ni, addr + off, ni->page_cache, NULL,
695 NMBM_MODE_PLACE_OOB);
696 if (ret)
697 return false;
698
699 if (memcmp(ni->page_cache, data, size))
700 return false;
701 }
702
703 return true;
704}
705
706/*
707 * nmbm_write_signature - Write signature to NAND chip
708 * @ni: NMBM instance structure
709 * @limit: top block address allowed for writing
710 * @signature: the signature to be written
711 * @signature_ba: the actual block address where signature is written to
712 *
713 * Write signature within a specific range, from chip bottom to limit.
714 * At most one block will be written.
715 *
716 * @limit is not counted into the allowed write address.
717 */
718static bool nmbm_write_signature(struct nmbm_instance *ni, uint32_t limit,
719 const struct nmbm_signature *signature,
720 uint32_t *signature_ba)
721{
722 uint32_t ba = ni->block_count - 1;
723 bool success;
724
725 while (ba > limit) {
726 WATCHDOG_RESET();
727
728 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
729 goto next_block;
730
731 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
732 if (!success)
733 goto skip_bad_block;
734
735 success = nmbm_write_repeated_data(ni, ba, signature,
736 sizeof(*signature));
737 if (success) {
738 *signature_ba = ba;
739 return true;
740 }
741
742 skip_bad_block:
743 nmbm_mark_phys_bad_block(ni, ba);
744 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
745
746 next_block:
747 ba--;
748 };
749
750 return false;
751}
752
753/*
754 * nmbn_read_data - Read data
755 * @ni: NMBM instance structure
756 * @addr: linear address where the data will be read from
757 * @data: the data to be read
758 * @size: the size of data
759 *
760 * Read data range.
761 * Every page will be tried for at most NMBM_TRY_COUNT times.
762 *
763 * Return 0 for success, positive value for ecc error,
764 * negative value for other errors
765 */
766static int nmbn_read_data(struct nmbm_instance *ni, uint64_t addr, void *data,
767 uint32_t size)
768{
769 uint64_t off = addr;
770 uint8_t *ptr = data;
771 uint32_t sizeremain = size, chunksize, leading;
772 int ret;
773
774 while (sizeremain) {
775 WATCHDOG_RESET();
776
777 leading = off & ni->writesize_mask;
778 chunksize = ni->lower.writesize - leading;
779 if (chunksize > sizeremain)
780 chunksize = sizeremain;
781
782 if (chunksize == ni->lower.writesize) {
783 ret = nmbm_read_phys_page(ni, off - leading, ptr, NULL,
784 NMBM_MODE_PLACE_OOB);
785 if (ret)
786 return ret;
787 } else {
788 ret = nmbm_read_phys_page(ni, off - leading,
789 ni->page_cache, NULL,
790 NMBM_MODE_PLACE_OOB);
791 if (ret)
792 return ret;
793
794 memcpy(ptr, ni->page_cache + leading, chunksize);
795 }
796
797 off += chunksize;
798 ptr += chunksize;
799 sizeremain -= chunksize;
800 }
801
802 return 0;
803}
804
805/*
806 * nmbn_write_verify_data - Write data with validation
807 * @ni: NMBM instance structure
808 * @addr: linear address where the data will be written to
809 * @data: the data to be written
810 * @size: the size of data
811 *
812 * Write data and verify.
813 * Every page will be tried for at most NMBM_TRY_COUNT times.
814 */
815static bool nmbn_write_verify_data(struct nmbm_instance *ni, uint64_t addr,
816 const void *data, uint32_t size)
817{
818 uint64_t off = addr;
819 const uint8_t *ptr = data;
820 uint32_t sizeremain = size, chunksize, leading;
821 bool success;
822 int ret;
823
824 while (sizeremain) {
825 WATCHDOG_RESET();
826
827 leading = off & ni->writesize_mask;
828 chunksize = ni->lower.writesize - leading;
829 if (chunksize > sizeremain)
830 chunksize = sizeremain;
831
832 /* Prepare page data. fill 0xff to unused region */
833 memset(ni->page_cache, 0xff, ni->rawpage_size);
834 memcpy(ni->page_cache + leading, ptr, chunksize);
835
836 success = nmbm_write_phys_page(ni, off - leading,
837 ni->page_cache, NULL,
838 NMBM_MODE_PLACE_OOB);
839 if (!success)
840 return false;
841
842 /* Verify the data just written. ECC error indicates failure */
843 ret = nmbm_read_phys_page(ni, off - leading, ni->page_cache,
844 NULL, NMBM_MODE_PLACE_OOB);
845 if (ret)
846 return false;
847
848 if (memcmp(ni->page_cache + leading, ptr, chunksize))
849 return false;
850
851 off += chunksize;
852 ptr += chunksize;
853 sizeremain -= chunksize;
854 }
855
856 return true;
857}
858
859/*
860 * nmbm_write_mgmt_range - Write management data into NAND within a range
861 * @ni: NMBM instance structure
862 * @addr: preferred start block address for writing
863 * @limit: highest block address allowed for writing
864 * @data: the data to be written
865 * @size: the size of data
866 * @actual_start_ba: actual start block address of data
867 * @actual_end_ba: block address after the end of data
868 *
869 * @limit is not counted into the allowed write address.
870 */
871static bool nmbm_write_mgmt_range(struct nmbm_instance *ni, uint32_t ba,
872 uint32_t limit, const void *data,
873 uint32_t size, uint32_t *actual_start_ba,
874 uint32_t *actual_end_ba)
875{
876 const uint8_t *ptr = data;
877 uint32_t sizeremain = size, chunksize;
878 bool success;
879
880 while (sizeremain && ba < limit) {
881 WATCHDOG_RESET();
882
883 chunksize = sizeremain;
884 if (chunksize > ni->lower.erasesize)
885 chunksize = ni->lower.erasesize;
886
887 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
888 goto next_block;
889
890 success = nmbm_erase_phys_block(ni, ba2addr(ni, ba));
891 if (!success)
892 goto skip_bad_block;
893
894 success = nmbn_write_verify_data(ni, ba2addr(ni, ba), ptr,
895 chunksize);
896 if (!success)
897 goto skip_bad_block;
898
899 if (sizeremain == size)
900 *actual_start_ba = ba;
901
902 ptr += chunksize;
903 sizeremain -= chunksize;
904
905 goto next_block;
906
907 skip_bad_block:
908 nmbm_mark_phys_bad_block(ni, ba);
909 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
910
911 next_block:
912 ba++;
913 }
914
915 if (sizeremain)
916 return false;
917
918 *actual_end_ba = ba;
919
920 return true;
921}
922
923/*
924 * nmbm_generate_info_table_cache - Generate info table cache data
925 * @ni: NMBM instance structure
926 *
927 * Generate info table cache data to be written into flash.
928 */
929static bool nmbm_generate_info_table_cache(struct nmbm_instance *ni)
930{
931 bool changed = false;
932
933 memset(ni->info_table_cache, 0xff, ni->info_table_size);
934
935 memcpy(ni->info_table_cache + ni->info_table.state_table_off,
936 ni->block_state, ni->state_table_size);
937
938 memcpy(ni->info_table_cache + ni->info_table.mapping_table_off,
939 ni->block_mapping, ni->mapping_table_size);
940
941 ni->info_table.header.magic = NMBM_MAGIC_INFO_TABLE;
942 ni->info_table.header.version = NMBM_VER;
943 ni->info_table.header.size = ni->info_table_size;
944
945 if (ni->block_state_changed || ni->block_mapping_changed) {
946 ni->info_table.write_count++;
947 changed = true;
948 }
949
950 memcpy(ni->info_table_cache, &ni->info_table, sizeof(ni->info_table));
951
952 nmbm_update_checksum((struct nmbm_header *)ni->info_table_cache);
953
954 return changed;
955}
956
957/*
958 * nmbm_write_info_table - Write info table into NAND within a range
959 * @ni: NMBM instance structure
960 * @ba: preferred start block address for writing
961 * @limit: highest block address allowed for writing
962 * @actual_start_ba: actual start block address of info table
963 * @actual_end_ba: block address after the end of info table
964 *
965 * @limit is counted into the allowed write address.
966 */
967static bool nmbm_write_info_table(struct nmbm_instance *ni, uint32_t ba,
968 uint32_t limit, uint32_t *actual_start_ba,
969 uint32_t *actual_end_ba)
970{
971 return nmbm_write_mgmt_range(ni, ba, limit, ni->info_table_cache,
972 ni->info_table_size, actual_start_ba,
973 actual_end_ba);
974}
975
976/*
977 * nmbm_mark_tables_clean - Mark info table `clean'
978 * @ni: NMBM instance structure
979 */
980static void nmbm_mark_tables_clean(struct nmbm_instance *ni)
981{
982 ni->block_state_changed = 0;
983 ni->block_mapping_changed = 0;
984}
985
986/*
987 * nmbm_try_reserve_blocks - Reserve blocks with compromisation
988 * @ni: NMBM instance structure
989 * @ba: start physical block address
990 * @nba: return physical block address after reservation
991 * @count: number of good blocks to be skipped
992 * @min_count: minimum number of good blocks to be skipped
993 * @limit: highest/lowest block address allowed for walking
994 *
995 * Reserve specific blocks. If failed, try to reserve as many as possible.
996 */
997static bool nmbm_try_reserve_blocks(struct nmbm_instance *ni, uint32_t ba,
998 uint32_t *nba, uint32_t count,
999 int32_t min_count, int32_t limit)
1000{
1001 int32_t nblocks = count;
1002 bool success;
1003
1004 while (nblocks >= min_count) {
1005 success = nmbm_block_walk(ni, true, ba, nba, nblocks, limit);
1006 if (success)
1007 return true;
1008
1009 nblocks--;
1010 }
1011
1012 return false;
1013}
1014
1015/*
1016 * nmbm_rebuild_info_table - Build main & backup info table from scratch
1017 * @ni: NMBM instance structure
1018 * @allow_no_gap: allow no spare blocks between two tables
1019 */
1020static bool nmbm_rebuild_info_table(struct nmbm_instance *ni)
1021{
1022 uint32_t table_start_ba, table_end_ba, next_start_ba;
1023 uint32_t main_table_end_ba;
1024 bool success;
1025
1026 /* Set initial value */
1027 ni->main_table_ba = 0;
1028 ni->backup_table_ba = 0;
1029 ni->mapping_blocks_ba = ni->mapping_blocks_top_ba;
1030
1031 /* Write main table */
1032 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1033 ni->mapping_blocks_top_ba,
1034 &table_start_ba, &table_end_ba);
1035 if (!success) {
1036 /* Failed to write main table, data will be lost */
1037 nlog_emerg(ni, "Unable to write at least one info table!\n");
1038 nlog_emerg(ni, "Please save your data before power off!\n");
1039 ni->protected = 1;
1040 return false;
1041 }
1042
1043 /* Main info table is successfully written, record its offset */
1044 ni->main_table_ba = table_start_ba;
1045 main_table_end_ba = table_end_ba;
1046
1047 /* Adjust mapping_blocks_ba */
1048 ni->mapping_blocks_ba = table_end_ba;
1049
1050 nmbm_mark_tables_clean(ni);
1051
1052 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1053
1054 /* Reserve spare blocks for main info table. */
1055 success = nmbm_try_reserve_blocks(ni, table_end_ba,
1056 &next_start_ba,
1057 ni->info_table_spare_blocks, 0,
1058 ni->mapping_blocks_top_ba -
1059 size2blk(ni, ni->info_table_size));
1060 if (!success) {
1061 /* There is no spare block. */
1062 nlog_debug(ni, "No room for backup info table\n");
1063 return true;
1064 }
1065
1066 /* Write backup info table. */
1067 success = nmbm_write_info_table(ni, next_start_ba,
1068 ni->mapping_blocks_top_ba,
1069 &table_start_ba, &table_end_ba);
1070 if (!success) {
1071 /* There is no enough blocks for backup table. */
1072 nlog_debug(ni, "No room for backup info table\n");
1073 return true;
1074 }
1075
1076 /* Backup table is successfully written, record its offset */
1077 ni->backup_table_ba = table_start_ba;
1078
1079 /* Adjust mapping_blocks_off */
1080 ni->mapping_blocks_ba = table_end_ba;
1081
1082 /* Erase spare blocks of main table to clean possible interference data */
1083 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1084
1085 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1086
1087 return true;
1088}
1089
1090/*
1091 * nmbm_rescue_single_info_table - Rescue when there is only one info table
1092 * @ni: NMBM instance structure
1093 *
1094 * This function is called when there is only one info table exists.
1095 * This function may fail if we can't write new info table
1096 */
1097static bool nmbm_rescue_single_info_table(struct nmbm_instance *ni)
1098{
1099 uint32_t table_start_ba, table_end_ba, write_ba;
1100 bool success;
1101
1102 /* Try to write new info table in front of existing table */
1103 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1104 ni->main_table_ba,
1105 &table_start_ba,
1106 &table_end_ba);
1107 if (success) {
1108 /*
1109 * New table becomes the main table, existing table becomes
1110 * the backup table.
1111 */
1112 ni->backup_table_ba = ni->main_table_ba;
1113 ni->main_table_ba = table_start_ba;
1114
1115 nmbm_mark_tables_clean(ni);
1116
1117 /* Erase spare blocks of main table to clean possible interference data */
1118 nmbm_erase_range(ni, table_end_ba, ni->backup_table_ba);
1119
1120 nlog_table_creation(ni, true, table_start_ba, table_end_ba);
1121
1122 return true;
1123 }
1124
1125 /* Try to reserve spare blocks for existing table */
1126 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1127 ni->info_table_spare_blocks, 0,
1128 ni->mapping_blocks_top_ba -
1129 size2blk(ni, ni->info_table_size));
1130 if (!success) {
1131 nlog_warn(ni, "Failed to rescue single info table\n");
1132 return false;
1133 }
1134
1135 /* Try to write new info table next to the existing table */
1136 while (write_ba >= ni->mapping_blocks_ba) {
1137 WATCHDOG_RESET();
1138
1139 success = nmbm_write_info_table(ni, write_ba,
1140 ni->mapping_blocks_top_ba,
1141 &table_start_ba,
1142 &table_end_ba);
1143 if (success)
1144 break;
1145
1146 write_ba--;
1147 }
1148
1149 if (success) {
1150 /* Erase spare blocks of main table to clean possible interference data */
1151 nmbm_erase_range(ni, ni->mapping_blocks_ba, table_start_ba);
1152
1153 /* New table becomes the backup table */
1154 ni->backup_table_ba = table_start_ba;
1155 ni->mapping_blocks_ba = table_end_ba;
1156
1157 nmbm_mark_tables_clean(ni);
1158
1159 nlog_table_creation(ni, false, table_start_ba, table_end_ba);
1160
1161 return true;
1162 }
1163
1164 nlog_warn(ni, "Failed to rescue single info table\n");
1165 return false;
1166}
1167
1168/*
1169 * nmbm_update_single_info_table - Update specific one info table
1170 * @ni: NMBM instance structure
1171 */
1172static bool nmbm_update_single_info_table(struct nmbm_instance *ni,
1173 bool update_main_table)
1174{
1175 uint32_t write_start_ba, write_limit, table_start_ba, table_end_ba;
1176 bool success;
1177
1178 /* Determine the write range */
1179 if (update_main_table) {
1180 write_start_ba = ni->main_table_ba;
1181 write_limit = ni->backup_table_ba;
1182 } else {
1183 write_start_ba = ni->backup_table_ba;
1184 write_limit = ni->mapping_blocks_top_ba;
1185 }
1186
1187 nmbm_mark_block_color_mgmt(ni, write_start_ba, write_limit - 1);
1188
1189 success = nmbm_write_info_table(ni, write_start_ba, write_limit,
1190 &table_start_ba, &table_end_ba);
1191 if (success) {
1192 if (update_main_table) {
1193 ni->main_table_ba = table_start_ba;
1194 } else {
1195 ni->backup_table_ba = table_start_ba;
1196 ni->mapping_blocks_ba = table_end_ba;
1197 }
1198
1199 nmbm_mark_tables_clean(ni);
1200
1201 nlog_table_update(ni, update_main_table, table_start_ba,
1202 table_end_ba);
1203
1204 return true;
1205 }
1206
1207 if (update_main_table) {
1208 /*
1209 * If failed to update main table, make backup table the new
1210 * main table, and call nmbm_rescue_single_info_table()
1211 */
1212 nlog_warn(ni, "Unable to update %s info table\n",
1213 update_main_table ? "Main" : "Backup");
1214
1215 ni->main_table_ba = ni->backup_table_ba;
1216 ni->backup_table_ba = 0;
1217 return nmbm_rescue_single_info_table(ni);
1218 }
1219
1220 /* Only one table left */
1221 ni->mapping_blocks_ba = ni->backup_table_ba;
1222 ni->backup_table_ba = 0;
1223
1224 return false;
1225}
1226
1227/*
1228 * nmbm_rescue_main_info_table - Rescue when failed to write main info table
1229 * @ni: NMBM instance structure
1230 *
1231 * This function is called when main info table failed to be written, and
1232 * backup info table exists.
1233 */
1234static bool nmbm_rescue_main_info_table(struct nmbm_instance *ni)
1235{
1236 uint32_t tmp_table_start_ba, tmp_table_end_ba, main_table_start_ba;
1237 uint32_t main_table_end_ba, write_ba;
1238 uint32_t info_table_erasesize = size2blk(ni, ni->info_table_size);
1239 bool success;
1240
1241 /* Try to reserve spare blocks for existing backup info table */
1242 success = nmbm_try_reserve_blocks(ni, ni->mapping_blocks_ba, &write_ba,
1243 ni->info_table_spare_blocks, 0,
1244 ni->mapping_blocks_top_ba -
1245 info_table_erasesize);
1246 if (!success) {
1247 /* There is no spare block. Backup info table becomes the main table. */
1248 nlog_err(ni, "No room for temporary info table\n");
1249 ni->main_table_ba = ni->backup_table_ba;
1250 ni->backup_table_ba = 0;
1251 return true;
1252 }
1253
1254 /* Try to write temporary info table into spare unmapped blocks */
1255 while (write_ba >= ni->mapping_blocks_ba) {
1256 WATCHDOG_RESET();
1257
1258 success = nmbm_write_info_table(ni, write_ba,
1259 ni->mapping_blocks_top_ba,
1260 &tmp_table_start_ba,
1261 &tmp_table_end_ba);
1262 if (success)
1263 break;
1264
1265 write_ba--;
1266 }
1267
1268 if (!success) {
1269 /* Backup info table becomes the main table */
1270 nlog_err(ni, "Failed to update main info table\n");
1271 ni->main_table_ba = ni->backup_table_ba;
1272 ni->backup_table_ba = 0;
1273 return true;
1274 }
1275
1276 /* Adjust mapping_blocks_off */
1277 ni->mapping_blocks_ba = tmp_table_end_ba;
1278
1279 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1280 tmp_table_end_ba - 1);
1281
1282 /*
1283 * Now write main info table at the beginning of management area.
1284 * This operation will generally destroy the original backup info
1285 * table.
1286 */
1287 success = nmbm_write_info_table(ni, ni->mgmt_start_ba,
1288 tmp_table_start_ba,
1289 &main_table_start_ba,
1290 &main_table_end_ba);
1291 if (!success) {
1292 /* Temporary info table becomes the main table */
1293 ni->main_table_ba = tmp_table_start_ba;
1294 ni->backup_table_ba = 0;
1295
1296 nmbm_mark_tables_clean(ni);
1297
1298 nlog_err(ni, "Failed to update main info table\n");
1299 nmbm_mark_block_color_info_table(ni, tmp_table_start_ba,
1300 tmp_table_end_ba - 1);
1301
1302 return true;
1303 }
1304
1305 /* Main info table has been successfully written, record its offset */
1306 ni->main_table_ba = main_table_start_ba;
1307
1308 nmbm_mark_tables_clean(ni);
1309
1310 nlog_table_creation(ni, true, main_table_start_ba, main_table_end_ba);
1311
1312 /*
1313 * Temporary info table becomes the new backup info table if it's
1314 * not overwritten.
1315 */
1316 if (main_table_end_ba <= tmp_table_start_ba) {
1317 ni->backup_table_ba = tmp_table_start_ba;
1318
1319 nlog_table_creation(ni, false, tmp_table_start_ba,
1320 tmp_table_end_ba);
1321
1322 return true;
1323 }
1324
1325 /* Adjust mapping_blocks_off */
1326 ni->mapping_blocks_ba = main_table_end_ba;
1327
1328 /* Try to reserve spare blocks for new main info table */
1329 success = nmbm_try_reserve_blocks(ni, main_table_end_ba, &write_ba,
1330 ni->info_table_spare_blocks, 0,
1331 ni->mapping_blocks_top_ba -
1332 info_table_erasesize);
1333 if (!success) {
1334 /* There is no spare block. Only main table exists. */
1335 nlog_err(ni, "No room for backup info table\n");
1336 ni->backup_table_ba = 0;
1337 return true;
1338 }
1339
1340 /* Write new backup info table. */
1341 while (write_ba >= main_table_end_ba) {
1342 WATCHDOG_RESET();
1343
1344 success = nmbm_write_info_table(ni, write_ba,
1345 ni->mapping_blocks_top_ba,
1346 &tmp_table_start_ba,
1347 &tmp_table_end_ba);
1348 if (success)
1349 break;
1350
1351 write_ba--;
1352 }
1353
1354 if (!success) {
1355 nlog_err(ni, "No room for backup info table\n");
1356 ni->backup_table_ba = 0;
1357 return true;
1358 }
1359
1360 /* Backup info table has been successfully written, record its offset */
1361 ni->backup_table_ba = tmp_table_start_ba;
1362
1363 /* Adjust mapping_blocks_off */
1364 ni->mapping_blocks_ba = tmp_table_end_ba;
1365
1366 /* Erase spare blocks of main table to clean possible interference data */
1367 nmbm_erase_range(ni, main_table_end_ba, ni->backup_table_ba);
1368
1369 nlog_table_creation(ni, false, tmp_table_start_ba, tmp_table_end_ba);
1370
1371 return true;
1372}
1373
1374/*
1375 * nmbm_update_info_table_once - Update info table once
1376 * @ni: NMBM instance structure
1377 * @force: force update
1378 *
1379 * Update both main and backup info table. Return true if at least one info
1380 * table has been successfully written.
1381 * This function only try to update info table once regard less of the result.
1382 */
1383static bool nmbm_update_info_table_once(struct nmbm_instance *ni, bool force)
1384{
1385 uint32_t table_start_ba, table_end_ba;
1386 uint32_t main_table_limit;
1387 bool success;
1388
1389 /* Do nothing if there is no change */
1390 if (!nmbm_generate_info_table_cache(ni) && !force)
1391 return true;
1392
1393 /* Check whether both two tables exist */
1394 if (!ni->backup_table_ba) {
1395 main_table_limit = ni->mapping_blocks_top_ba;
1396 goto write_main_table;
1397 }
1398
1399 nmbm_mark_block_color_mgmt(ni, ni->backup_table_ba,
1400 ni->mapping_blocks_ba - 1);
1401
1402 /*
1403 * Write backup info table in its current range.
1404 * Note that limit is set to mapping_blocks_top_off to provide as many
1405 * spare blocks as possible for the backup table. If at last
1406 * unmapped blocks are used by backup table, mapping_blocks_off will
1407 * be adjusted.
1408 */
1409 success = nmbm_write_info_table(ni, ni->backup_table_ba,
1410 ni->mapping_blocks_top_ba,
1411 &table_start_ba, &table_end_ba);
1412 if (!success) {
1413 /*
1414 * There is nothing to do if failed to write backup table.
1415 * Write the main table now.
1416 */
1417 nlog_err(ni, "No room for backup table\n");
1418 ni->mapping_blocks_ba = ni->backup_table_ba;
1419 ni->backup_table_ba = 0;
1420 main_table_limit = ni->mapping_blocks_top_ba;
1421 goto write_main_table;
1422 }
1423
1424 /* Backup table is successfully written, record its offset */
1425 ni->backup_table_ba = table_start_ba;
1426
1427 /* Adjust mapping_blocks_off */
1428 ni->mapping_blocks_ba = table_end_ba;
1429
1430 nmbm_mark_tables_clean(ni);
1431
1432 /* The normal limit of main table */
1433 main_table_limit = ni->backup_table_ba;
1434
1435 nlog_table_update(ni, false, table_start_ba, table_end_ba);
1436
1437write_main_table:
1438 if (!ni->main_table_ba)
1439 goto rebuild_tables;
1440
1441 if (!ni->backup_table_ba)
1442 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1443 ni->mapping_blocks_ba - 1);
1444 else
1445 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1446 ni->backup_table_ba - 1);
1447
1448 /* Write main info table in its current range */
1449 success = nmbm_write_info_table(ni, ni->main_table_ba,
1450 main_table_limit, &table_start_ba,
1451 &table_end_ba);
1452 if (!success) {
1453 /* If failed to write main table, go rescue procedure */
1454 if (!ni->backup_table_ba)
1455 goto rebuild_tables;
1456
1457 return nmbm_rescue_main_info_table(ni);
1458 }
1459
1460 /* Main info table is successfully written, record its offset */
1461 ni->main_table_ba = table_start_ba;
1462
1463 /* Adjust mapping_blocks_off */
1464 if (!ni->backup_table_ba)
1465 ni->mapping_blocks_ba = table_end_ba;
1466
1467 nmbm_mark_tables_clean(ni);
1468
1469 nlog_table_update(ni, true, table_start_ba, table_end_ba);
1470
1471 return true;
1472
1473rebuild_tables:
1474 return nmbm_rebuild_info_table(ni);
1475}
1476
1477/*
1478 * nmbm_update_info_table - Update info table
1479 * @ni: NMBM instance structure
1480 *
1481 * Update both main and backup info table. Return true if at least one table
1482 * has been successfully written.
1483 * This function will try to update info table repeatedly until no new bad
1484 * block found during updating.
1485 */
1486static bool nmbm_update_info_table(struct nmbm_instance *ni)
1487{
1488 bool success;
1489
1490 if (ni->protected)
1491 return true;
1492
1493 while (ni->block_state_changed || ni->block_mapping_changed) {
1494 success = nmbm_update_info_table_once(ni, false);
1495 if (!success) {
1496 nlog_err(ni, "Failed to update info table\n");
1497 return false;
1498 }
1499 }
1500
1501 return true;
1502}
1503
1504/*
1505 * nmbm_map_block - Map a bad block to a unused spare block
1506 * @ni: NMBM instance structure
1507 * @lb: logic block addr to map
1508 */
1509static bool nmbm_map_block(struct nmbm_instance *ni, uint32_t lb)
1510{
1511 uint32_t pb;
1512 bool success;
1513
1514 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1515 nlog_warn(ni, "No spare unmapped blocks.\n");
1516 return false;
1517 }
1518
1519 success = nmbm_block_walk(ni, false, ni->mapping_blocks_top_ba, &pb, 0,
1520 ni->mapping_blocks_ba);
1521 if (!success) {
1522 nlog_warn(ni, "No spare unmapped blocks.\n");
1523 nmbm_update_info_table(ni);
1524 ni->mapping_blocks_top_ba = ni->mapping_blocks_ba;
1525 return false;
1526 }
1527
1528 ni->block_mapping[lb] = pb;
1529 ni->mapping_blocks_top_ba--;
1530 ni->block_mapping_changed++;
1531
1532 nlog_info(ni, "Logic block %u mapped to physical blcok %u\n", lb, pb);
1533 nmbm_mark_block_color_mapped(ni, pb);
1534
1535 return true;
1536}
1537
1538/*
1539 * nmbm_create_info_table - Create info table(s)
1540 * @ni: NMBM instance structure
1541 *
1542 * This function assumes that the chip has no existing info table(s)
1543 */
1544static bool nmbm_create_info_table(struct nmbm_instance *ni)
1545{
1546 uint32_t lb;
1547 bool success;
1548
1549 /* Set initial mapping_blocks_top_off */
1550 success = nmbm_block_walk(ni, false, ni->signature_ba,
1551 &ni->mapping_blocks_top_ba, 1,
1552 ni->mgmt_start_ba);
1553 if (!success) {
1554 nlog_err(ni, "No room for spare blocks\n");
1555 return false;
1556 }
1557
1558 /* Generate info table cache */
1559 nmbm_generate_info_table_cache(ni);
1560
1561 /* Write info table */
1562 success = nmbm_rebuild_info_table(ni);
1563 if (!success) {
1564 nlog_err(ni, "Failed to build info tables\n");
1565 return false;
1566 }
1567
1568 /* Remap bad block(s) at end of data area */
1569 for (lb = ni->data_block_count; lb < ni->mgmt_start_ba; lb++) {
1570 success = nmbm_map_block(ni, lb);
1571 if (!success)
1572 break;
1573
1574 ni->data_block_count++;
1575 }
1576
1577 /* If state table and/or mapping table changed, update info table. */
1578 success = nmbm_update_info_table(ni);
1579 if (!success)
1580 return false;
1581
1582 return true;
1583}
1584
1585/*
1586 * nmbm_create_new - Create NMBM on a new chip
1587 * @ni: NMBM instance structure
1588 */
1589static bool nmbm_create_new(struct nmbm_instance *ni)
1590{
1591 bool success;
1592
1593 /* Determine the boundary of management blocks */
1594 ni->mgmt_start_ba = ni->block_count * (NMBM_MGMT_DIV - ni->lower.max_ratio) / NMBM_MGMT_DIV;
1595
1596 if (ni->lower.max_reserved_blocks && ni->block_count - ni->mgmt_start_ba > ni->lower.max_reserved_blocks)
1597 ni->mgmt_start_ba = ni->block_count - ni->lower.max_reserved_blocks;
1598
1599 nlog_info(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1600 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1601 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba, ni->block_count - 1);
1602
1603 /* Fill block state table & mapping table */
1604 nmbm_scan_badblocks(ni);
1605 nmbm_build_mapping_table(ni);
1606
1607 /* Write signature */
1608 ni->signature.header.magic = NMBM_MAGIC_SIGNATURE;
1609 ni->signature.header.version = NMBM_VER;
1610 ni->signature.header.size = sizeof(ni->signature);
1611 ni->signature.nand_size = ni->lower.size;
1612 ni->signature.block_size = ni->lower.erasesize;
1613 ni->signature.page_size = ni->lower.writesize;
1614 ni->signature.spare_size = ni->lower.oobsize;
1615 ni->signature.mgmt_start_pb = ni->mgmt_start_ba;
1616 ni->signature.max_try_count = NMBM_TRY_COUNT;
1617 nmbm_update_checksum(&ni->signature.header);
1618
1619 success = nmbm_write_signature(ni, ni->mgmt_start_ba,
1620 &ni->signature, &ni->signature_ba);
1621 if (!success) {
1622 nlog_err(ni, "Failed to write signature to a proper offset\n");
1623 return false;
1624 }
1625
1626 nlog_info(ni, "Signature has been written to block %u [0x%08llx]\n",
1627 ni->signature_ba, ba2addr(ni, ni->signature_ba));
1628 nmbm_mark_block_color_signature(ni, ni->signature_ba);
1629
1630 /* Write info table(s) */
1631 success = nmbm_create_info_table(ni);
1632 if (success) {
1633 nlog_info(ni, "NMBM has been successfully created\n");
1634 return true;
1635 }
1636
1637 return false;
1638}
1639
1640/*
1641 * nmbm_check_info_table_header - Check if a info table header is valid
1642 * @ni: NMBM instance structure
1643 * @data: pointer to the info table header
1644 */
1645static bool nmbm_check_info_table_header(struct nmbm_instance *ni, void *data)
1646{
1647 struct nmbm_info_table_header *ifthdr = data;
1648
1649 if (ifthdr->header.magic != NMBM_MAGIC_INFO_TABLE)
1650 return false;
1651
1652 if (ifthdr->header.size != ni->info_table_size)
1653 return false;
1654
1655 if (ifthdr->mapping_table_off - ifthdr->state_table_off < ni->state_table_size)
1656 return false;
1657
1658 if (ni->info_table_size - ifthdr->mapping_table_off < ni->mapping_table_size)
1659 return false;
1660
1661 return true;
1662}
1663
1664/*
1665 * nmbm_check_info_table - Check if a whole info table is valid
1666 * @ni: NMBM instance structure
1667 * @start_ba: start block address of this table
1668 * @end_ba: end block address of this table
1669 * @data: pointer to the info table header
1670 * @mapping_blocks_top_ba: return the block address of top remapped block
1671 */
1672static bool nmbm_check_info_table(struct nmbm_instance *ni, uint32_t start_ba,
1673 uint32_t end_ba, void *data,
1674 uint32_t *mapping_blocks_top_ba)
1675{
1676 struct nmbm_info_table_header *ifthdr = data;
1677 int32_t *block_mapping = (int32_t *)((uintptr_t)data + ifthdr->mapping_table_off);
1678 nmbm_bitmap_t *block_state = (nmbm_bitmap_t *)((uintptr_t)data + ifthdr->state_table_off);
1679 uint32_t minimum_mapping_pb = ni->signature_ba;
1680 uint32_t ba;
1681
1682 for (ba = 0; ba < ni->data_block_count; ba++) {
1683 if ((block_mapping[ba] >= ni->data_block_count && block_mapping[ba] < end_ba) ||
1684 block_mapping[ba] == ni->signature_ba)
1685 return false;
1686
1687 if (block_mapping[ba] >= end_ba && block_mapping[ba] < minimum_mapping_pb)
1688 minimum_mapping_pb = block_mapping[ba];
1689 }
1690
1691 for (ba = start_ba; ba < end_ba; ba++) {
1692 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1693 continue;
1694
1695 if (nmbm_get_block_state_raw(block_state, ba) != BLOCK_ST_GOOD)
1696 return false;
1697 }
1698
1699 *mapping_blocks_top_ba = minimum_mapping_pb - 1;
1700
1701 return true;
1702}
1703
1704/*
1705 * nmbm_try_load_info_table - Try to load info table from a address
1706 * @ni: NMBM instance structure
1707 * @ba: start block address of the info table
1708 * @eba: return the block address after end of the table
1709 * @write_count: return the write count of this table
1710 * @mapping_blocks_top_ba: return the block address of top remapped block
1711 * @table_loaded: used to record whether ni->info_table has valid data
1712 */
1713static bool nmbm_try_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1714 uint32_t *eba, uint32_t *write_count,
1715 uint32_t *mapping_blocks_top_ba,
1716 bool table_loaded)
1717{
1718 struct nmbm_info_table_header *ifthdr = (void *)ni->info_table_cache;
1719 uint8_t *off = ni->info_table_cache;
1720 uint32_t limit = ba + size2blk(ni, ni->info_table_size);
1721 uint32_t start_ba = 0, chunksize, sizeremain = ni->info_table_size;
1722 bool success, checkhdr = true;
1723 int ret;
1724
1725 while (sizeremain && ba < limit) {
1726 WATCHDOG_RESET();
1727
1728 if (nmbm_get_block_state(ni, ba) != BLOCK_ST_GOOD)
1729 goto next_block;
1730
1731 if (nmbm_check_bad_phys_block(ni, ba)) {
1732 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1733 goto next_block;
1734 }
1735
1736 chunksize = sizeremain;
1737 if (chunksize > ni->lower.erasesize)
1738 chunksize = ni->lower.erasesize;
1739
1740 /* Assume block with ECC error has no info table data */
1741 ret = nmbn_read_data(ni, ba2addr(ni, ba), off, chunksize);
1742 if (ret < 0)
1743 goto skip_bad_block;
1744 else if (ret > 0)
1745 return false;
1746
1747 if (checkhdr) {
1748 success = nmbm_check_info_table_header(ni, off);
1749 if (!success)
1750 return false;
1751
1752 start_ba = ba;
1753 checkhdr = false;
1754 }
1755
1756 off += chunksize;
1757 sizeremain -= chunksize;
1758
1759 goto next_block;
1760
1761 skip_bad_block:
1762 /* Only mark bad in memory */
1763 nmbm_set_block_state(ni, ba, BLOCK_ST_BAD);
1764
1765 next_block:
1766 ba++;
1767 }
1768
1769 if (sizeremain)
1770 return false;
1771
1772 success = nmbm_check_header(ni->info_table_cache, ni->info_table_size);
1773 if (!success)
1774 return false;
1775
1776 *eba = ba;
1777 *write_count = ifthdr->write_count;
1778
1779 success = nmbm_check_info_table(ni, start_ba, ba, ni->info_table_cache,
1780 mapping_blocks_top_ba);
1781 if (!success)
1782 return false;
1783
1784 if (!table_loaded || ifthdr->write_count > ni->info_table.write_count) {
1785 memcpy(&ni->info_table, ifthdr, sizeof(ni->info_table));
1786 memcpy(ni->block_state,
1787 (uint8_t *)ifthdr + ifthdr->state_table_off,
1788 ni->state_table_size);
1789 memcpy(ni->block_mapping,
1790 (uint8_t *)ifthdr + ifthdr->mapping_table_off,
1791 ni->mapping_table_size);
1792 ni->info_table.write_count = ifthdr->write_count;
1793 }
1794
1795 return true;
1796}
1797
1798/*
1799 * nmbm_search_info_table - Search info table from specific address
1800 * @ni: NMBM instance structure
1801 * @ba: start block address to search
1802 * @limit: highest block address allowed for searching
1803 * @table_start_ba: return the start block address of this table
1804 * @table_end_ba: return the block address after end of this table
1805 * @write_count: return the write count of this table
1806 * @mapping_blocks_top_ba: return the block address of top remapped block
1807 * @table_loaded: used to record whether ni->info_table has valid data
1808 */
1809static bool nmbm_search_info_table(struct nmbm_instance *ni, uint32_t ba,
1810 uint32_t limit, uint32_t *table_start_ba,
1811 uint32_t *table_end_ba,
1812 uint32_t *write_count,
1813 uint32_t *mapping_blocks_top_ba,
1814 bool table_loaded)
1815{
1816 bool success;
1817
1818 while (ba < limit - size2blk(ni, ni->info_table_size)) {
1819 WATCHDOG_RESET();
1820
1821 success = nmbm_try_load_info_table(ni, ba, table_end_ba,
1822 write_count,
1823 mapping_blocks_top_ba,
1824 table_loaded);
1825 if (success) {
1826 *table_start_ba = ba;
1827 return true;
1828 }
1829
1830 ba++;
1831 }
1832
1833 return false;
1834}
1835
1836/*
1837 * nmbm_load_info_table - Load info table(s) from a chip
1838 * @ni: NMBM instance structure
1839 * @ba: start block address to search info table
1840 * @limit: highest block address allowed for searching
1841 */
1842static bool nmbm_load_info_table(struct nmbm_instance *ni, uint32_t ba,
1843 uint32_t limit)
1844{
1845 uint32_t main_table_end_ba, backup_table_end_ba, table_end_ba;
1846 uint32_t main_mapping_blocks_top_ba, backup_mapping_blocks_top_ba;
1847 uint32_t main_table_write_count, backup_table_write_count;
1848 uint32_t i;
1849 bool success;
1850
1851 /* Set initial value */
1852 ni->main_table_ba = 0;
1853 ni->backup_table_ba = 0;
1854 ni->info_table.write_count = 0;
1855 ni->mapping_blocks_top_ba = ni->signature_ba - 1;
1856 ni->data_block_count = ni->signature.mgmt_start_pb;
1857
1858 /* Find first info table */
1859 success = nmbm_search_info_table(ni, ba, limit, &ni->main_table_ba,
1860 &main_table_end_ba, &main_table_write_count,
1861 &main_mapping_blocks_top_ba, false);
1862 if (!success) {
1863 nlog_warn(ni, "No valid info table found\n");
1864 return false;
1865 }
1866
1867 table_end_ba = main_table_end_ba;
1868
1869 nlog_table_found(ni, true, main_table_write_count, ni->main_table_ba,
1870 main_table_end_ba);
1871
1872 /* Find second info table */
1873 success = nmbm_search_info_table(ni, main_table_end_ba, limit,
1874 &ni->backup_table_ba, &backup_table_end_ba,
1875 &backup_table_write_count, &backup_mapping_blocks_top_ba, true);
1876 if (!success) {
1877 nlog_warn(ni, "Second info table not found\n");
1878 } else {
1879 table_end_ba = backup_table_end_ba;
1880
1881 nlog_table_found(ni, false, backup_table_write_count,
1882 ni->backup_table_ba, backup_table_end_ba);
1883 }
1884
1885 /* Pick mapping_blocks_top_ba */
1886 if (!ni->backup_table_ba) {
1887 ni->mapping_blocks_top_ba= main_mapping_blocks_top_ba;
1888 } else {
1889 if (main_table_write_count >= backup_table_write_count)
1890 ni->mapping_blocks_top_ba = main_mapping_blocks_top_ba;
1891 else
1892 ni->mapping_blocks_top_ba = backup_mapping_blocks_top_ba;
1893 }
1894
1895 /* Set final mapping_blocks_ba */
1896 ni->mapping_blocks_ba = table_end_ba;
1897
1898 /* Set final data_block_count */
1899 for (i = ni->signature.mgmt_start_pb; i > 0; i--) {
1900 if (ni->block_mapping[i - 1] >= 0) {
1901 ni->data_block_count = i;
1902 break;
1903 }
1904 }
1905
1906 /* Debug purpose: mark mapped blocks and bad blocks */
1907 for (i = 0; i < ni->data_block_count; i++) {
1908 if (ni->block_mapping[i] > ni->mapping_blocks_top_ba)
1909 nmbm_mark_block_color_mapped(ni, ni->block_mapping[i]);
1910 }
1911
1912 for (i = 0; i < ni->block_count; i++) {
1913 if (nmbm_get_block_state(ni, i) == BLOCK_ST_BAD)
1914 nmbm_mark_block_color_bad(ni, i);
1915 }
1916
1917 /* Regenerate the info table cache from the final selected info table */
1918 nmbm_generate_info_table_cache(ni);
1919
1920 /*
1921 * If only one table exists, try to write another table.
1922 * If two tables have different write count, try to update info table
1923 */
1924 if (!ni->backup_table_ba) {
1925 success = nmbm_rescue_single_info_table(ni);
1926 } else if (main_table_write_count != backup_table_write_count) {
1927 /* Mark state & mapping tables changed */
1928 ni->block_state_changed = 1;
1929 ni->block_mapping_changed = 1;
1930
1931 success = nmbm_update_single_info_table(ni,
1932 main_table_write_count < backup_table_write_count);
1933 } else {
1934 success = true;
1935 }
1936
1937 /*
1938 * If there is no spare unmapped blocks, or still only one table
1939 * exists, set the chip to read-only
1940 */
1941 if (ni->mapping_blocks_ba == ni->mapping_blocks_top_ba) {
1942 nlog_warn(ni, "No spare unmapped blocks. Device is now read-only\n");
1943 ni->protected = 1;
1944 } else if (!success) {
1945 nlog_warn(ni, "Only one info table found. Device is now read-only\n");
1946 ni->protected = 1;
1947 }
1948
1949 return true;
1950}
1951
1952/*
1953 * nmbm_load_existing - Load NMBM from a new chip
1954 * @ni: NMBM instance structure
1955 */
1956static bool nmbm_load_existing(struct nmbm_instance *ni)
1957{
1958 bool success;
1959
1960 /* Calculate the boundary of management blocks */
1961 ni->mgmt_start_ba = ni->signature.mgmt_start_pb;
1962
1963 nlog_debug(ni, "NMBM management region starts at block %u [0x%08llx]\n",
1964 ni->mgmt_start_ba, ba2addr(ni, ni->mgmt_start_ba));
1965 nmbm_mark_block_color_mgmt(ni, ni->mgmt_start_ba,
1966 ni->signature_ba - 1);
1967
1968 /* Look for info table(s) */
1969 success = nmbm_load_info_table(ni, ni->mgmt_start_ba,
1970 ni->signature_ba);
1971 if (success) {
1972 nlog_info(ni, "NMBM has been successfully attached\n");
1973 return true;
1974 }
1975
1976 if (!(ni->lower.flags & NMBM_F_CREATE))
1977 return false;
1978
1979 /* Fill block state table & mapping table */
1980 nmbm_scan_badblocks(ni);
1981 nmbm_build_mapping_table(ni);
1982
1983 /* Write info table(s) */
1984 success = nmbm_create_info_table(ni);
1985 if (success) {
1986 nlog_info(ni, "NMBM has been successfully created\n");
1987 return true;
1988 }
1989
1990 return false;
1991}
1992
1993/*
1994 * nmbm_find_signature - Find signature in the lower NAND chip
1995 * @ni: NMBM instance structure
1996 * @signature_ba: used for storing block address of the signature
1997 * @signature_ba: return the actual block address of signature block
1998 *
1999 * Find a valid signature from a specific range in the lower NAND chip,
2000 * from bottom (highest address) to top (lowest address)
2001 *
2002 * Return true if found.
2003 */
2004static bool nmbm_find_signature(struct nmbm_instance *ni,
2005 struct nmbm_signature *signature,
2006 uint32_t *signature_ba)
2007{
2008 struct nmbm_signature sig;
2009 uint64_t off, addr;
2010 uint32_t block_count, ba, limit;
2011 bool success;
2012 int ret;
2013
2014 /* Calculate top and bottom block address */
2015 block_count = ni->lower.size >> ni->erasesize_shift;
2016 ba = block_count;
2017 limit = (block_count / NMBM_MGMT_DIV) * (NMBM_MGMT_DIV - ni->lower.max_ratio);
2018 if (ni->lower.max_reserved_blocks && block_count - limit > ni->lower.max_reserved_blocks)
2019 limit = block_count - ni->lower.max_reserved_blocks;
2020
2021 while (ba >= limit) {
2022 WATCHDOG_RESET();
2023
2024 ba--;
2025 addr = ba2addr(ni, ba);
2026
2027 if (nmbm_check_bad_phys_block(ni, ba))
2028 continue;
2029
2030 /* Check every page.
2031 * As long as at leaset one page contains valid signature,
2032 * the block is treated as a valid signature block.
2033 */
2034 for (off = 0; off < ni->lower.erasesize;
2035 off += ni->lower.writesize) {
2036 WATCHDOG_RESET();
2037
2038 ret = nmbn_read_data(ni, addr + off, &sig,
2039 sizeof(sig));
2040 if (ret)
2041 continue;
2042
2043 /* Check for header size and checksum */
2044 success = nmbm_check_header(&sig, sizeof(sig));
2045 if (!success)
2046 continue;
2047
2048 /* Check for header magic */
2049 if (sig.header.magic == NMBM_MAGIC_SIGNATURE) {
2050 /* Found it */
2051 memcpy(signature, &sig, sizeof(sig));
2052 *signature_ba = ba;
2053 return true;
2054 }
2055 }
2056 };
2057
2058 return false;
2059}
2060
2061/*
2062 * is_power_of_2_u64 - Check whether a 64-bit integer is power of 2
2063 * @n: number to check
2064 */
2065static bool is_power_of_2_u64(uint64_t n)
2066{
2067 return (n != 0 && ((n & (n - 1)) == 0));
2068}
2069
2070/*
2071 * nmbm_check_lower_members - Validate the members of lower NAND device
2072 * @nld: Lower NAND chip structure
2073 */
2074static bool nmbm_check_lower_members(struct nmbm_lower_device *nld)
2075{
2076
2077 if (!nld->size || !is_power_of_2_u64(nld->size)) {
2078 nmbm_log_lower(nld, NMBM_LOG_ERR,
2079 "Chip size %llu is not valid\n", nld->size);
2080 return false;
2081 }
2082
2083 if (!nld->erasesize || !is_power_of_2(nld->erasesize)) {
2084 nmbm_log_lower(nld, NMBM_LOG_ERR,
2085 "Block size %u is not valid\n", nld->erasesize);
2086 return false;
2087 }
2088
2089 if (!nld->writesize || !is_power_of_2(nld->writesize)) {
2090 nmbm_log_lower(nld, NMBM_LOG_ERR,
2091 "Page size %u is not valid\n", nld->writesize);
2092 return false;
2093 }
2094
2095 if (!nld->oobsize || !is_power_of_2(nld->oobsize)) {
2096 nmbm_log_lower(nld, NMBM_LOG_ERR,
2097 "Page spare size %u is not valid\n", nld->oobsize);
2098 return false;
2099 }
2100
2101 if (!nld->read_page || !nld->write_page || !nld->erase_block) {
2102 nmbm_log_lower(nld, NMBM_LOG_ERR,
2103 "read_page(), write_page() and erase_block() are required\n");
2104 return false;
2105 }
2106
2107 /* Data sanity check */
2108 if (!nld->max_ratio)
2109 nld->max_ratio = 1;
2110
2111 if (nld->max_ratio >= NMBM_MGMT_DIV - 1) {
2112 nmbm_log_lower(nld, NMBM_LOG_ERR,
2113 "max ratio %u is invalid\n", nld->max_ratio);
2114 return false;
2115 }
2116
2117 if (nld->max_reserved_blocks && nld->max_reserved_blocks < NMBM_MGMT_BLOCKS_MIN) {
2118 nmbm_log_lower(nld, NMBM_LOG_ERR,
2119 "max reserved blocks %u is too small\n", nld->max_reserved_blocks);
2120 return false;
2121 }
2122
2123 return true;
2124}
2125
2126/*
2127 * nmbm_calc_structure_size - Calculate the instance structure size
2128 * @nld: NMBM lower device structure
2129 */
2130size_t nmbm_calc_structure_size(struct nmbm_lower_device *nld)
2131{
2132 uint32_t state_table_size, mapping_table_size, info_table_size;
2133 uint32_t block_count;
2134
2135 block_count = nmbm_lldiv(nld->size, nld->erasesize);
2136
2137 /* Calculate info table size */
2138 state_table_size = ((block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2139 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2140 mapping_table_size = block_count * sizeof(int32_t);
2141
2142 info_table_size = NMBM_ALIGN(sizeof(struct nmbm_info_table_header),
2143 nld->writesize);
2144 info_table_size += NMBM_ALIGN(state_table_size, nld->writesize);
2145 info_table_size += NMBM_ALIGN(mapping_table_size, nld->writesize);
2146
2147 return info_table_size + state_table_size + mapping_table_size +
2148 nld->writesize + nld->oobsize + sizeof(struct nmbm_instance);
2149}
2150
2151/*
2152 * nmbm_init_structure - Initialize members of instance structure
2153 * @ni: NMBM instance structure
2154 */
2155static void nmbm_init_structure(struct nmbm_instance *ni)
2156{
2157 uint32_t pages_per_block, blocks_per_chip;
2158 uintptr_t ptr;
2159
2160 pages_per_block = ni->lower.erasesize / ni->lower.writesize;
2161 blocks_per_chip = nmbm_lldiv(ni->lower.size, ni->lower.erasesize);
2162
2163 ni->rawpage_size = ni->lower.writesize + ni->lower.oobsize;
2164 ni->rawblock_size = pages_per_block * ni->rawpage_size;
2165 ni->rawchip_size = blocks_per_chip * ni->rawblock_size;
2166
2167 ni->writesize_mask = ni->lower.writesize - 1;
2168 ni->erasesize_mask = ni->lower.erasesize - 1;
2169
2170 ni->writesize_shift = ffs(ni->lower.writesize) - 1;
2171 ni->erasesize_shift = ffs(ni->lower.erasesize) - 1;
2172
2173 /* Calculate number of block this chip */
2174 ni->block_count = ni->lower.size >> ni->erasesize_shift;
2175
2176 /* Calculate info table size */
2177 ni->state_table_size = ((ni->block_count + NMBM_BITMAP_BLOCKS_PER_UNIT - 1) /
2178 NMBM_BITMAP_BLOCKS_PER_UNIT) * NMBM_BITMAP_UNIT_SIZE;
2179 ni->mapping_table_size = ni->block_count * sizeof(*ni->block_mapping);
2180
2181 ni->info_table_size = NMBM_ALIGN(sizeof(ni->info_table),
2182 ni->lower.writesize);
2183 ni->info_table.state_table_off = ni->info_table_size;
2184
2185 ni->info_table_size += NMBM_ALIGN(ni->state_table_size,
2186 ni->lower.writesize);
2187 ni->info_table.mapping_table_off = ni->info_table_size;
2188
2189 ni->info_table_size += NMBM_ALIGN(ni->mapping_table_size,
2190 ni->lower.writesize);
2191
2192 ni->info_table_spare_blocks = nmbm_get_spare_block_count(
2193 size2blk(ni, ni->info_table_size));
2194
2195 /* Assign memory to members */
2196 ptr = (uintptr_t)ni + sizeof(*ni);
2197
2198 ni->info_table_cache = (void *)ptr;
2199 ptr += ni->info_table_size;
2200
2201 ni->block_state = (void *)ptr;
2202 ptr += ni->state_table_size;
2203
2204 ni->block_mapping = (void *)ptr;
2205 ptr += ni->mapping_table_size;
2206
2207 ni->page_cache = (uint8_t *)ptr;
2208
2209 /* Initialize block state table */
2210 ni->block_state_changed = 0;
2211 memset(ni->block_state, 0xff, ni->state_table_size);
2212
2213 /* Initialize block mapping table */
2214 ni->block_mapping_changed = 0;
2215}
2216
2217/*
2218 * nmbm_attach - Attach to a lower device
2219 * @nld: NMBM lower device structure
2220 * @ni: NMBM instance structure
2221 */
2222int nmbm_attach(struct nmbm_lower_device *nld, struct nmbm_instance *ni)
2223{
2224 bool success;
2225
2226 if (!nld || !ni)
2227 return -EINVAL;
2228
2229 /* Set default log level */
2230 ni->log_display_level = NMBM_DEFAULT_LOG_LEVEL;
2231
2232 /* Check lower members */
2233 success = nmbm_check_lower_members(nld);
2234 if (!success)
2235 return -EINVAL;
2236
2237 /* Initialize NMBM instance */
2238 memcpy(&ni->lower, nld, sizeof(struct nmbm_lower_device));
2239 nmbm_init_structure(ni);
2240
2241 success = nmbm_find_signature(ni, &ni->signature, &ni->signature_ba);
2242 if (!success) {
2243 if (!(nld->flags & NMBM_F_CREATE)) {
2244 nlog_err(ni, "Signature not found\n");
2245 return -ENODEV;
2246 }
2247
2248 success = nmbm_create_new(ni);
2249 if (!success)
2250 return -ENODEV;
2251
2252 return 0;
2253 }
2254
2255 nlog_info(ni, "Signature found at block %u [0x%08llx]\n",
2256 ni->signature_ba, ba2addr(ni, ni->signature_ba));
2257 nmbm_mark_block_color_signature(ni, ni->signature_ba);
2258
2259 if (ni->signature.header.version != NMBM_VER) {
2260 nlog_err(ni, "NMBM version %u.%u is not supported\n",
2261 NMBM_VERSION_MAJOR_GET(ni->signature.header.version),
2262 NMBM_VERSION_MINOR_GET(ni->signature.header.version));
2263 return -EINVAL;
2264 }
2265
2266 if (ni->signature.nand_size != nld->size ||
2267 ni->signature.block_size != nld->erasesize ||
2268 ni->signature.page_size != nld->writesize ||
2269 ni->signature.spare_size != nld->oobsize) {
2270 nlog_err(ni, "NMBM configuration mismatch\n");
2271 return -EINVAL;
2272 }
2273
2274 success = nmbm_load_existing(ni);
2275 if (!success)
2276 return -ENODEV;
2277
2278 return 0;
2279}
2280
2281/*
2282 * nmbm_detach - Detach from a lower device, and save all tables
2283 * @ni: NMBM instance structure
2284 */
2285int nmbm_detach(struct nmbm_instance *ni)
2286{
2287 if (!ni)
2288 return -EINVAL;
2289
2290 nmbm_update_info_table(ni);
2291
2292 nmbm_mark_block_color_normal(ni, 0, ni->block_count - 1);
2293
2294 return 0;
2295}
2296
2297/*
2298 * nmbm_erase_logic_block - Erase a logic block
2299 * @ni: NMBM instance structure
2300 * @nmbm_erase_logic_block: logic block address
2301 *
2302 * Logic block will be mapped to physical block before erasing.
2303 * Bad block found during erasinh will be remapped to a good block if there is
2304 * still at least one good spare block available.
2305 */
2306static int nmbm_erase_logic_block(struct nmbm_instance *ni, uint32_t block_addr)
2307{
2308 uint32_t pb;
2309 bool success;
2310
2311retry:
2312 /* Map logic block to physical block */
2313 pb = ni->block_mapping[block_addr];
2314
2315 /* Whether the logic block is good (has valid mapping) */
2316 if ((int32_t)pb < 0) {
2317 nlog_debug(ni, "Logic block %u is a bad block\n", block_addr);
2318 return -EIO;
2319 }
2320
2321 /* Remap logic block if current physical block is a bad block */
2322 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD ||
2323 nmbm_get_block_state(ni, pb) == BLOCK_ST_NEED_REMAP)
2324 goto remap_logic_block;
2325
2326 success = nmbm_erase_phys_block(ni, ba2addr(ni, pb));
2327 if (success)
2328 return 0;
2329
2330 /* Mark bad block */
2331 nmbm_mark_phys_bad_block(ni, pb);
2332 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2333
2334remap_logic_block:
2335 /* Try to assign a new block */
2336 success = nmbm_map_block(ni, block_addr);
2337 if (!success) {
2338 /* Mark logic block unusable, and update info table */
2339 ni->block_mapping[block_addr] = -1;
2340 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2341 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2342 nmbm_update_info_table(ni);
2343 return -EIO;
2344 }
2345
2346 /* Update info table before erasing */
2347 if (nmbm_get_block_state(ni, pb) != BLOCK_ST_NEED_REMAP)
2348 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2349 nmbm_update_info_table(ni);
2350
2351 goto retry;
2352}
2353
2354/*
2355 * nmbm_erase_block_range - Erase logic blocks
2356 * @ni: NMBM instance structure
2357 * @addr: logic linear address
2358 * @size: erase range
2359 * @failed_addr: return failed block address if error occurs
2360 */
2361int nmbm_erase_block_range(struct nmbm_instance *ni, uint64_t addr,
2362 uint64_t size, uint64_t *failed_addr)
2363{
2364 uint32_t start_ba, end_ba;
2365 int ret;
2366
2367 if (!ni)
2368 return -EINVAL;
2369
2370 /* Sanity check */
2371 if (ni->protected) {
2372 nlog_debug(ni, "Device is forced read-only\n");
2373 return -EROFS;
2374 }
2375
2376 if (addr >= ba2addr(ni, ni->data_block_count)) {
2377 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2378 return -EINVAL;
2379 }
2380
2381 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2382 nlog_err(ni, "Erase range 0xllxu is too large\n", size);
2383 return -EINVAL;
2384 }
2385
2386 if (!size) {
2387 nlog_warn(ni, "No blocks to be erased\n");
2388 return 0;
2389 }
2390
2391 start_ba = addr2ba(ni, addr);
2392 end_ba = addr2ba(ni, addr + size - 1);
2393
2394 while (start_ba <= end_ba) {
2395 WATCHDOG_RESET();
2396
2397 ret = nmbm_erase_logic_block(ni, start_ba);
2398 if (ret) {
2399 if (failed_addr)
2400 *failed_addr = ba2addr(ni, start_ba);
2401 return ret;
2402 }
2403
2404 start_ba++;
2405 }
2406
2407 return 0;
2408}
2409
2410/*
2411 * nmbm_read_logic_page - Read page based on logic address
2412 * @ni: NMBM instance structure
2413 * @addr: logic linear address
2414 * @data: buffer to store main data. optional.
2415 * @oob: buffer to store oob data. optional.
2416 * @mode: read mode
2417 */
2418static int nmbm_read_logic_page(struct nmbm_instance *ni, uint64_t addr,
2419 void *data, void *oob, enum nmbm_oob_mode mode)
2420{
2421 uint32_t lb, pb, offset;
2422 uint64_t paddr;
2423 int ret;
2424
2425 /* Extract block address and in-block offset */
2426 lb = addr2ba(ni, addr);
2427 offset = addr & ni->erasesize_mask;
2428
2429 /* Map logic block to physical block */
2430 pb = ni->block_mapping[lb];
2431
2432 /* Whether the logic block is good (has valid mapping) */
2433 if ((int32_t)pb < 0) {
2434 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2435 return -EIO;
2436 }
2437
2438 /* Fail if physical block is marked bad */
2439 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2440 return -EIO;
2441
2442 /* Assemble new address */
2443 paddr = ba2addr(ni, pb) + offset;
2444
2445 ret = nmbm_read_phys_page(ni, paddr, data, oob, mode);
2446 if (!ret)
2447 return 0;
2448
2449 /* For ECC error, return positive value only */
2450 if (ret > 0)
2451 return 1;
2452
2453 /*
2454 * Do not remap bad block here. Just mark this block in state table.
2455 * Remap this block on erasing.
2456 */
2457 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2458 nmbm_update_info_table(ni);
2459
2460 return -EIO;
2461}
2462
2463/*
2464 * nmbm_read_single_page - Read one page based on logic address
2465 * @ni: NMBM instance structure
2466 * @addr: logic linear address
2467 * @data: buffer to store main data. optional.
2468 * @oob: buffer to store oob data. optional.
2469 * @mode: read mode
2470 */
2471int nmbm_read_single_page(struct nmbm_instance *ni, uint64_t addr, void *data,
2472 void *oob, enum nmbm_oob_mode mode)
2473{
2474 if (!ni)
2475 return -EINVAL;
2476
2477 /* Sanity check */
2478 if (ni->protected) {
2479 nlog_debug(ni, "Device is forced read-only\n");
2480 return -EROFS;
2481 }
2482
2483 if (addr >= ba2addr(ni, ni->data_block_count)) {
2484 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2485 return -EINVAL;
2486 }
2487
2488 return nmbm_read_logic_page(ni, addr, data, oob, mode);
2489}
2490
2491/*
2492 * nmbm_read_range - Read data without oob
2493 * @ni: NMBM instance structure
2494 * @addr: logic linear address
2495 * @size: data size to read
2496 * @data: buffer to store main data to be read
2497 * @mode: read mode
2498 * @retlen: return actual data size read
2499 */
2500int nmbm_read_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2501 void *data, enum nmbm_oob_mode mode, size_t *retlen)
2502{
2503 uint64_t off = addr;
2504 uint8_t *ptr = data;
2505 size_t sizeremain = size, chunksize, leading;
2506 int ret;
2507
2508 if (!ni)
2509 return -EINVAL;
2510
2511 /* Sanity check */
2512 if (ni->protected) {
2513 nlog_debug(ni, "Device is forced read-only\n");
2514 return -EROFS;
2515 }
2516
2517 if (addr >= ba2addr(ni, ni->data_block_count)) {
2518 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2519 return -EINVAL;
2520 }
2521
2522 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2523 nlog_err(ni, "Read range 0x%llx is too large\n", size);
2524 return -EINVAL;
2525 }
2526
2527 if (!size) {
2528 nlog_warn(ni, "No data to be read\n");
2529 return 0;
2530 }
2531
2532 while (sizeremain) {
2533 WATCHDOG_RESET();
2534
2535 leading = off & ni->writesize_mask;
2536 chunksize = ni->lower.writesize - leading;
2537 if (chunksize > sizeremain)
2538 chunksize = sizeremain;
2539
2540 if (chunksize == ni->lower.writesize) {
2541 ret = nmbm_read_logic_page(ni, off - leading, ptr,
2542 NULL, mode);
2543 if (ret)
2544 break;
2545 } else {
2546 ret = nmbm_read_logic_page(ni, off - leading,
2547 ni->page_cache, NULL,
2548 mode);
2549 if (ret)
2550 break;
2551
2552 memcpy(ptr, ni->page_cache + leading, chunksize);
2553 }
2554
2555 off += chunksize;
2556 ptr += chunksize;
2557 sizeremain -= chunksize;
2558 }
2559
2560 if (retlen)
2561 *retlen = size - sizeremain;
2562
2563 return ret;
2564}
2565
2566/*
2567 * nmbm_write_logic_page - Read page based on logic address
2568 * @ni: NMBM instance structure
2569 * @addr: logic linear address
2570 * @data: buffer contains main data. optional.
2571 * @oob: buffer contains oob data. optional.
2572 * @mode: write mode
2573 */
2574static int nmbm_write_logic_page(struct nmbm_instance *ni, uint64_t addr,
2575 const void *data, const void *oob,
2576 enum nmbm_oob_mode mode)
2577{
2578 uint32_t lb, pb, offset;
2579 uint64_t paddr;
2580 bool success;
2581
2582 /* Extract block address and in-block offset */
2583 lb = addr2ba(ni, addr);
2584 offset = addr & ni->erasesize_mask;
2585
2586 /* Map logic block to physical block */
2587 pb = ni->block_mapping[lb];
2588
2589 /* Whether the logic block is good (has valid mapping) */
2590 if ((int32_t)pb < 0) {
2591 nlog_debug(ni, "Logic block %u is a bad block\n", lb);
2592 return -EIO;
2593 }
2594
2595 /* Fail if physical block is marked bad */
2596 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2597 return -EIO;
2598
2599 /* Assemble new address */
2600 paddr = ba2addr(ni, pb) + offset;
2601
2602 success = nmbm_write_phys_page(ni, paddr, data, oob, mode);
2603 if (success)
2604 return 0;
2605
2606 /*
2607 * Do not remap bad block here. Just mark this block in state table.
2608 * Remap this block on erasing.
2609 */
2610 nmbm_set_block_state(ni, pb, BLOCK_ST_NEED_REMAP);
2611 nmbm_update_info_table(ni);
2612
2613 return -EIO;
2614}
2615
2616/*
2617 * nmbm_write_single_page - Write one page based on logic address
2618 * @ni: NMBM instance structure
2619 * @addr: logic linear address
2620 * @data: buffer contains main data. optional.
2621 * @oob: buffer contains oob data. optional.
2622 * @mode: write mode
2623 */
2624int nmbm_write_single_page(struct nmbm_instance *ni, uint64_t addr,
2625 const void *data, const void *oob,
2626 enum nmbm_oob_mode mode)
2627{
2628 if (!ni)
2629 return -EINVAL;
2630
2631 /* Sanity check */
2632 if (ni->protected) {
2633 nlog_debug(ni, "Device is forced read-only\n");
2634 return -EROFS;
2635 }
2636
2637 if (addr >= ba2addr(ni, ni->data_block_count)) {
2638 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2639 return -EINVAL;
2640 }
2641
2642 return nmbm_write_logic_page(ni, addr, data, oob, mode);
2643}
2644
2645/*
2646 * nmbm_write_range - Write data without oob
2647 * @ni: NMBM instance structure
2648 * @addr: logic linear address
2649 * @size: data size to write
2650 * @data: buffer contains data to be written
2651 * @mode: write mode
2652 * @retlen: return actual data size written
2653 */
2654int nmbm_write_range(struct nmbm_instance *ni, uint64_t addr, size_t size,
2655 const void *data, enum nmbm_oob_mode mode,
2656 size_t *retlen)
2657{
2658 uint64_t off = addr;
2659 const uint8_t *ptr = data;
2660 size_t sizeremain = size, chunksize, leading;
2661 int ret;
2662
2663 if (!ni)
2664 return -EINVAL;
2665
2666 /* Sanity check */
2667 if (ni->protected) {
2668 nlog_debug(ni, "Device is forced read-only\n");
2669 return -EROFS;
2670 }
2671
2672 if (addr >= ba2addr(ni, ni->data_block_count)) {
2673 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2674 return -EINVAL;
2675 }
2676
2677 if (addr + size > ba2addr(ni, ni->data_block_count)) {
2678 nlog_err(ni, "Write size 0x%zx is too large\n", size);
2679 return -EINVAL;
2680 }
2681
2682 if (!size) {
2683 nlog_warn(ni, "No data to be written\n");
2684 return 0;
2685 }
2686
2687 while (sizeremain) {
2688 WATCHDOG_RESET();
2689
2690 leading = off & ni->writesize_mask;
2691 chunksize = ni->lower.writesize - leading;
2692 if (chunksize > sizeremain)
2693 chunksize = sizeremain;
2694
2695 if (chunksize == ni->lower.writesize) {
2696 ret = nmbm_write_logic_page(ni, off - leading, ptr,
2697 NULL, mode);
2698 if (ret)
2699 break;
2700 } else {
2701 memset(ni->page_cache, 0xff, leading);
2702 memcpy(ni->page_cache + leading, ptr, chunksize);
2703
2704 ret = nmbm_write_logic_page(ni, off - leading,
2705 ni->page_cache, NULL,
2706 mode);
2707 if (ret)
2708 break;
2709 }
2710
2711 off += chunksize;
2712 ptr += chunksize;
2713 sizeremain -= chunksize;
2714 }
2715
2716 if (retlen)
2717 *retlen = size - sizeremain;
2718
2719 return ret;
2720}
2721
2722/*
2723 * nmbm_check_bad_block - Check whether a logic block is usable
2724 * @ni: NMBM instance structure
2725 * @addr: logic linear address
2726 */
2727int nmbm_check_bad_block(struct nmbm_instance *ni, uint64_t addr)
2728{
2729 uint32_t lb, pb;
2730
2731 if (!ni)
2732 return -EINVAL;
2733
2734 if (addr >= ba2addr(ni, ni->data_block_count)) {
2735 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2736 return -EINVAL;
2737 }
2738
2739 lb = addr2ba(ni, addr);
2740
2741 /* Map logic block to physical block */
2742 pb = ni->block_mapping[lb];
2743
2744 if ((int32_t)pb < 0)
2745 return 1;
2746
2747 if (nmbm_get_block_state(ni, pb) == BLOCK_ST_BAD)
2748 return 1;
2749
2750 return 0;
2751}
2752
2753/*
2754 * nmbm_mark_bad_block - Mark a logic block unusable
2755 * @ni: NMBM instance structure
2756 * @addr: logic linear address
2757 */
2758int nmbm_mark_bad_block(struct nmbm_instance *ni, uint64_t addr)
2759{
2760 uint32_t lb, pb;
2761
2762 if (!ni)
2763 return -EINVAL;
2764
2765 if (addr >= ba2addr(ni, ni->data_block_count)) {
2766 nlog_err(ni, "Address 0x%llx is invalid\n", addr);
2767 return -EINVAL;
2768 }
2769
2770 lb = addr2ba(ni, addr);
2771
2772 /* Map logic block to physical block */
2773 pb = ni->block_mapping[lb];
2774
2775 if ((int32_t)pb < 0)
2776 return 0;
2777
2778 ni->block_mapping[lb] = -1;
2779 nmbm_mark_phys_bad_block(ni, pb);
2780 nmbm_set_block_state(ni, pb, BLOCK_ST_BAD);
2781 nmbm_update_info_table(ni);
2782
2783 return 0;
2784}
2785
2786/*
2787 * nmbm_get_avail_size - Get available user data size
2788 * @ni: NMBM instance structure
2789 */
2790uint64_t nmbm_get_avail_size(struct nmbm_instance *ni)
2791{
2792 if (!ni)
2793 return 0;
2794
2795 return (uint64_t)ni->data_block_count << ni->erasesize_shift;
2796}
2797
2798/*
2799 * nmbm_get_lower_device - Get lower device structure
2800 * @ni: NMBM instance structure
2801 * @nld: pointer to hold the data of lower device structure
2802 */
2803int nmbm_get_lower_device(struct nmbm_instance *ni, struct nmbm_lower_device *nld)
2804{
2805 if (!ni)
2806 return -EINVAL;
2807
2808 if (nld)
2809 memcpy(nld, &ni->lower, sizeof(*nld));
2810
2811 return 0;
2812}
2813
2814#include "nmbm-debug.inl"