blob: 85dab6508181d47b70aa96d2e5f0ad2e7a3ff208 [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MTD layer for NAND Mapped-block Management (NMBM)
4 *
5 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
6 *
7 * Author: Weijie Gao <weijie.gao@mediatek.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/flashchip.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of_platform.h>
23#include <linux/kern_levels.h>
24
25#include "nmbm-private.h"
26#include "nmbm-debug.h"
27
28#define NMBM_MAX_RATIO_DEFAULT 1
29#define NMBM_MAX_BLOCKS_DEFAULT 256
30
31struct nmbm_mtd {
32 struct mtd_info upper;
33 struct mtd_info *lower;
34
35 struct nmbm_instance *ni;
36 uint8_t *page_cache;
37
38 flstate_t state;
39 spinlock_t lock;
40 wait_queue_head_t wq;
41
42 struct device *dev;
43 struct list_head node;
44};
45
46struct list_head nmbm_devs;
47static DEFINE_MUTEX(nmbm_devs_lock);
48
49static int nmbm_lower_read_page(void *arg, uint64_t addr, void *buf, void *oob,
50 enum nmbm_oob_mode mode)
51{
52 struct nmbm_mtd *nm = arg;
53 struct mtd_oob_ops ops;
54 int ret;
55
56 memset(&ops, 0, sizeof(ops));
57
58 switch (mode) {
59 case NMBM_MODE_PLACE_OOB:
60 ops.mode = MTD_OPS_PLACE_OOB;
61 break;
62 case NMBM_MODE_AUTO_OOB:
63 ops.mode = MTD_OPS_AUTO_OOB;
64 break;
65 case NMBM_MODE_RAW:
66 ops.mode = MTD_OPS_RAW;
67 break;
68 default:
69 pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
70 return -ENOTSUPP;
71 }
72
73 if (buf) {
74 ops.datbuf = buf;
75 ops.len = nm->lower->writesize;
76 }
77
78 if (oob) {
79 ops.oobbuf = oob;
80 ops.ooblen = mtd_oobavail(nm->lower, &ops);
81 }
82
83 ret = mtd_read_oob(nm->lower, addr, &ops);
84 nm->upper.ecc_stats.corrected = nm->lower->ecc_stats.corrected;
85 nm->upper.ecc_stats.failed = nm->lower->ecc_stats.failed;
86
87 if (ret == -EBADMSG)
88 return 1;
89
90 if (ret && ret != -EUCLEAN)
91 return ret;
92
93 return 0;
94}
95
96static int nmbm_lower_write_page(void *arg, uint64_t addr, const void *buf,
97 const void *oob, enum nmbm_oob_mode mode)
98{
99 struct nmbm_mtd *nm = arg;
100 struct mtd_oob_ops ops;
101
102 memset(&ops, 0, sizeof(ops));
103
104 switch (mode) {
105 case NMBM_MODE_PLACE_OOB:
106 ops.mode = MTD_OPS_PLACE_OOB;
107 break;
108 case NMBM_MODE_AUTO_OOB:
109 ops.mode = MTD_OPS_AUTO_OOB;
110 break;
111 case NMBM_MODE_RAW:
112 ops.mode = MTD_OPS_RAW;
113 break;
114 default:
115 pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
116 return -ENOTSUPP;
117 }
118
119 if (buf) {
120 ops.datbuf = (uint8_t *)buf;
121 ops.len = nm->lower->writesize;
122 }
123
124 if (oob) {
125 ops.oobbuf = (uint8_t *)oob;
126 ops.ooblen = mtd_oobavail(nm->lower, &ops);
127 }
128
129 return mtd_write_oob(nm->lower, addr, &ops);
130}
131
132static int nmbm_lower_erase_block(void *arg, uint64_t addr)
133{
134 struct nmbm_mtd *nm = arg;
135 struct erase_info ei;
136
137 memset(&ei, 0, sizeof(ei));
138
139 ei.addr = addr;
140 ei.len = nm->lower->erasesize;
141
142 return mtd_erase(nm->lower, &ei);
143}
144
145static int nmbm_lower_is_bad_block(void *arg, uint64_t addr)
146{
147 struct nmbm_mtd *nm = arg;
148
149 return mtd_block_isbad(nm->lower, addr);
150}
151
152static int nmbm_lower_mark_bad_block(void *arg, uint64_t addr)
153{
154 struct nmbm_mtd *nm = arg;
155
156 return mtd_block_markbad(nm->lower, addr);
157}
158
159static void nmbm_lower_log(void *arg, enum nmbm_log_category level,
160 const char *fmt, va_list ap)
161{
162 struct nmbm_mtd *nm = arg;
163 char *msg;
164 char *kl;
165
166 msg = kvasprintf(GFP_KERNEL, fmt, ap);
167 if (!msg) {
168 dev_warn(nm->dev, "unable to print log\n");
169 return;
170 }
171
172 switch (level) {
173 case NMBM_LOG_DEBUG:
174 kl = KERN_DEBUG;
175 break;
176 case NMBM_LOG_WARN:
177 kl = KERN_WARNING;
178 break;
179 case NMBM_LOG_ERR:
180 kl = KERN_ERR;
181 break;
182 case NMBM_LOG_EMERG:
183 kl = KERN_EMERG;
184 break;
185 default:
186 kl = KERN_INFO ;
187 }
188
189 dev_printk(kl, nm->dev, "%s", msg);
190
191 kfree(msg);
192}
193
194static int nmbm_get_device(struct nmbm_mtd *nm, int new_state)
195{
196 DECLARE_WAITQUEUE(wait, current);
197
198retry:
199 spin_lock(&nm->lock);
200
201 if (nm->state == FL_READY) {
202 nm->state = new_state;
203 spin_unlock(&nm->lock);
204 return 0;
205 }
206
207 if (new_state == FL_PM_SUSPENDED) {
208 if (nm->state == FL_PM_SUSPENDED) {
209 spin_unlock(&nm->lock);
210 return 0;
211 }
212 }
213
214 set_current_state(TASK_UNINTERRUPTIBLE);
215 add_wait_queue(&nm->wq, &wait);
216 spin_unlock(&nm->lock);
217 schedule();
218 remove_wait_queue(&nm->wq, &wait);
219 goto retry;
220}
221
222static void nmbm_release_device(struct nmbm_mtd *nm)
223{
224 spin_lock(&nm->lock);
225 nm->state = FL_READY;
226 wake_up(&nm->wq);
227 spin_unlock(&nm->lock);
228}
229
230static int nmbm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
231{
232 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
233 int ret;
234
235 nmbm_get_device(nm, FL_ERASING);
236
237 ret = nmbm_erase_block_range(nm->ni, instr->addr, instr->len,
238 &instr->fail_addr);
239
240 nmbm_release_device(nm);
241
242 if (!ret)
243 return 0;
244
245 return -EIO;
246}
247
248static int nmbm_mtd_read_data(struct nmbm_mtd *nm, uint64_t addr,
249 struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
250{
251 size_t len, ooblen, maxooblen, chklen;
252 uint32_t col, ooboffs;
253 uint8_t *datcache, *oobcache;
254 int ret;
255
256 col = addr & nm->lower->writesize_mask;
257 addr &= ~nm->lower->writesize_mask;
258 maxooblen = mtd_oobavail(nm->lower, ops);
259 ooboffs = ops->ooboffs;
260 ooblen = ops->ooblen;
261 len = ops->len;
262
263 datcache = len ? nm->page_cache : NULL;
264 oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
265
266 ops->oobretlen = 0;
267 ops->retlen = 0;
268
269 while (len || ooblen) {
270 ret = nmbm_read_single_page(nm->ni, addr, datcache, oobcache,
271 mode);
272 if (ret) {
273 if (ret > 0)
274 return -EBADMSG;
275 return -EIO;
276 }
277
278 if (len) {
279 /* Move data */
280 chklen = nm->lower->writesize - col;
281 if (chklen > len)
282 chklen = len;
283
284 memcpy(ops->datbuf + ops->retlen, datcache + col,
285 chklen);
286 len -= chklen;
287 col = 0; /* (col + chklen) % */
288 ops->retlen += chklen;
289 }
290
291 if (ooblen) {
292 /* Move oob */
293 chklen = maxooblen - ooboffs;
294 if (chklen > ooblen)
295 chklen = ooblen;
296
297 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
298 chklen);
299 ooblen -= chklen;
300 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
301 ops->oobretlen += chklen;
302 }
303
304 addr += nm->lower->writesize;
305 }
306
307 return 0;
308}
309
310static int nmbm_mtd_read_oob(struct mtd_info *mtd, loff_t from,
311 struct mtd_oob_ops *ops)
312{
313 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
314 uint32_t maxooblen;
315 enum nmbm_oob_mode mode;
316 int ret;
317
318 if (!ops->oobbuf && !ops->datbuf) {
319 if (ops->ooblen || ops->len)
320 return -EINVAL;
321
322 return 0;
323 }
324
325 switch (ops->mode) {
326 case MTD_OPS_PLACE_OOB:
327 mode = NMBM_MODE_PLACE_OOB;
328 break;
329 case MTD_OPS_AUTO_OOB:
330 mode = NMBM_MODE_AUTO_OOB;
331 break;
332 case MTD_OPS_RAW:
333 mode = NMBM_MODE_RAW;
334 break;
335 default:
336 pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
337 return -ENOTSUPP;
338 }
339
340 maxooblen = mtd_oobavail(mtd, ops);
341
342 /* Do not allow read past end of device */
343 if (ops->datbuf && (from + ops->len) > mtd->size) {
344 pr_debug("%s: attempt to read beyond end of device\n",
345 __func__);
346 return -EINVAL;
347 }
348
349 if (!ops->oobbuf) {
350 nmbm_get_device(nm, FL_READING);
351
352 /* Optimized for reading data only */
353 ret = nmbm_read_range(nm->ni, from, ops->len, ops->datbuf,
354 mode, &ops->retlen);
355
356 nmbm_release_device(nm);
357
358 if (ret > 0)
359 return -EBADMSG;
360 else if (ret)
361 return -EIO;
362
363 return 0;
364 }
365
366 if (unlikely(ops->ooboffs >= maxooblen)) {
367 pr_debug("%s: attempt to start read outside oob\n",
368 __func__);
369 return -EINVAL;
370 }
371
372 if (unlikely(from >= mtd->size ||
373 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
374 (from >> mtd->writesize_shift)) * maxooblen)) {
375 pr_debug("%s: attempt to read beyond end of device\n",
376 __func__);
377 return -EINVAL;
378 }
379
380 nmbm_get_device(nm, FL_READING);
381 ret = nmbm_mtd_read_data(nm, from, ops, mode);
382 nmbm_release_device(nm);
383
384 return ret;
385}
386
387static int nmbm_mtd_write_data(struct nmbm_mtd *nm, uint64_t addr,
388 struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
389{
390 size_t len, ooblen, maxooblen, chklen;
391 uint32_t col, ooboffs;
392 uint8_t *datcache, *oobcache;
393 int ret;
394
395 col = addr & nm->lower->writesize_mask;
396 addr &= ~nm->lower->writesize_mask;
397 maxooblen = mtd_oobavail(nm->lower, ops);
398 ooboffs = ops->ooboffs;
399 ooblen = ops->ooblen;
400 len = ops->len;
401
402 datcache = len ? nm->page_cache : NULL;
403 oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
404
405 ops->oobretlen = 0;
406 ops->retlen = 0;
407
408 while (len || ooblen) {
409 if (len) {
410 /* Move data */
411 chklen = nm->lower->writesize - col;
412 if (chklen > len)
413 chklen = len;
414
415 memset(datcache, 0xff, col);
416 memcpy(datcache + col, ops->datbuf + ops->retlen,
417 chklen);
418 memset(datcache + col + chklen, 0xff,
419 nm->lower->writesize - col - chklen);
420 len -= chklen;
421 col = 0; /* (col + chklen) % */
422 ops->retlen += chklen;
423 }
424
425 if (ooblen) {
426 /* Move oob */
427 chklen = maxooblen - ooboffs;
428 if (chklen > ooblen)
429 chklen = ooblen;
430
431 memset(oobcache, 0xff, ooboffs);
432 memcpy(oobcache + ooboffs,
433 ops->oobbuf + ops->oobretlen, chklen);
434 memset(oobcache + ooboffs + chklen, 0xff,
435 nm->lower->oobsize - ooboffs - chklen);
436 ooblen -= chklen;
437 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
438 ops->oobretlen += chklen;
439 }
440
441 ret = nmbm_write_single_page(nm->ni, addr, datcache, oobcache,
442 mode);
443 if (ret)
444 return ret;
445
446 addr += nm->lower->writesize;
447 }
448
449 return 0;
450}
451
452static int nmbm_mtd_write_oob(struct mtd_info *mtd, loff_t to,
453 struct mtd_oob_ops *ops)
454{
455 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
456 enum nmbm_oob_mode mode;
457 uint32_t maxooblen;
458 int ret;
459
460 if (!ops->oobbuf && !ops->datbuf) {
461 if (ops->ooblen || ops->len)
462 return -EINVAL;
463
464 return 0;
465 }
466
467 switch (ops->mode) {
468 case MTD_OPS_PLACE_OOB:
469 mode = NMBM_MODE_PLACE_OOB;
470 break;
471 case MTD_OPS_AUTO_OOB:
472 mode = NMBM_MODE_AUTO_OOB;
473 break;
474 case MTD_OPS_RAW:
475 mode = NMBM_MODE_RAW;
476 break;
477 default:
478 pr_debug("%s: unsupported oob mode: %u\n", __func__,
479 ops->mode);
480 return -ENOTSUPP;
481 }
482
483 maxooblen = mtd_oobavail(mtd, ops);
484
485 /* Do not allow write past end of device */
486 if (ops->datbuf && (to + ops->len) > mtd->size) {
487 pr_debug("%s: attempt to write beyond end of device\n",
488 __func__);
489 return -EINVAL;
490 }
491
492 if (!ops->oobbuf) {
493 nmbm_get_device(nm, FL_WRITING);
494
495 /* Optimized for writing data only */
496 ret = nmbm_write_range(nm->ni, to, ops->len, ops->datbuf,
497 mode, &ops->retlen);
498
499 nmbm_release_device(nm);
500
501 return ret;
502 }
503
504 if (unlikely(ops->ooboffs >= maxooblen)) {
505 pr_debug("%s: attempt to start write outside oob\n",
506 __func__);
507 return -EINVAL;
508 }
509
510 if (unlikely(to >= mtd->size ||
511 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
512 (to >> mtd->writesize_shift)) * maxooblen)) {
513 pr_debug("%s: attempt to write beyond end of device\n",
514 __func__);
515 return -EINVAL;
516 }
517
518 nmbm_get_device(nm, FL_WRITING);
519 ret = nmbm_mtd_write_data(nm, to, ops, mode);
520 nmbm_release_device(nm);
521
522 return ret;
523}
524
525static int nmbm_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
526{
527 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
528 int ret;
529
530 nmbm_get_device(nm, FL_READING);
531 ret = nmbm_check_bad_block(nm->ni, offs);
532 nmbm_release_device(nm);
533
534 return ret;
535}
536
537static int nmbm_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
538{
539 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
540 int ret;
541
542 nmbm_get_device(nm, FL_WRITING);
543 ret = nmbm_mark_bad_block(nm->ni, offs);
544 nmbm_release_device(nm);
545
546 return ret;
547}
548
549static void nmbm_mtd_shutdown(struct mtd_info *mtd)
550{
551 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
552
553 nmbm_get_device(nm, FL_PM_SUSPENDED);
554}
555
556static int nmbm_probe(struct platform_device *pdev)
557{
558 struct device_node *mtd_np, *np = pdev->dev.of_node;
559 uint32_t max_ratio, max_reserved_blocks, alloc_size;
560 struct nmbm_lower_device nld;
561 struct mtd_info *lower, *mtd;
562 struct nmbm_mtd *nm;
563 const char *mtdname;
564 bool forced_create;
565 int ret;
566
567 mtd_np = of_parse_phandle(np, "lower-mtd-device", 0);
568 if (mtd_np) {
569 lower = get_mtd_device_by_node(mtd_np);
570 if (!IS_ERR(lower))
571 goto do_attach_mtd;
572
573 dev_dbg(&pdev->dev, "failed to find mtd device by phandle\n");
574 return -EPROBE_DEFER;
575 }
576
577 ret = of_property_read_string(np, "lower-mtd-name", &mtdname);
578 if (!ret) {
579 lower = get_mtd_device_nm(mtdname);
580 if (!IS_ERR(lower))
581 goto do_attach_mtd;
582
583 dev_dbg(&pdev->dev, "failed to find mtd device by name '%s'\n",
584 mtdname);
585 return -EPROBE_DEFER;
586 }
587
588do_attach_mtd:
589 if (of_property_read_u32(np, "max-ratio", &max_ratio))
590 max_ratio = NMBM_MAX_RATIO_DEFAULT;
591
592 if (of_property_read_u32(np, "max-reserved-blocks",
593 &max_reserved_blocks))
594 max_reserved_blocks = NMBM_MAX_BLOCKS_DEFAULT;
595
596 forced_create = of_property_read_bool(np, "forced-create");
597
598 memset(&nld, 0, sizeof(nld));
599
600 nld.flags = forced_create ? NMBM_F_CREATE : 0;
601 nld.max_ratio = max_ratio;
602 nld.max_reserved_blocks = max_reserved_blocks;
603
604 nld.size = lower->size;
605 nld.erasesize = lower->erasesize;
606 nld.writesize = lower->writesize;
607 nld.oobsize = lower->oobsize;
608 nld.oobavail = lower->oobavail;
609
610 nld.read_page = nmbm_lower_read_page;
611 nld.write_page = nmbm_lower_write_page;
612 nld.erase_block = nmbm_lower_erase_block;
613 nld.is_bad_block = nmbm_lower_is_bad_block;
614 nld.mark_bad_block = nmbm_lower_mark_bad_block;
615
616 nld.logprint = nmbm_lower_log;
617
618 alloc_size = nmbm_calc_structure_size(&nld);
619
620 nm = devm_kzalloc(&pdev->dev, sizeof(*nm) + alloc_size +
621 lower->writesize + lower->oobsize, GFP_KERNEL);
622 if (!nm) {
623 ret = -ENOMEM;
624 goto out;
625 }
626
627 nm->ni = (void *)nm + sizeof(*nm);
628 nm->page_cache = (uint8_t *)nm->ni + alloc_size;
629 nm->lower = lower;
630 nm->dev = &pdev->dev;
631
632 INIT_LIST_HEAD(&nm->node);
633 spin_lock_init(&nm->lock);
634 init_waitqueue_head(&nm->wq);
635
636 nld.arg = nm;
637
638 ret = nmbm_attach(&nld, nm->ni);
639 if (ret)
640 goto out;
641
642 /* Initialize upper mtd */
643 mtd = &nm->upper;
644
645 mtd->owner = THIS_MODULE;
646 mtd->dev.parent = &pdev->dev;
647 mtd->type = lower->type;
648 mtd->flags = lower->flags;
649
650 mtd->size = (uint64_t)nm->ni->data_block_count * lower->erasesize;
651 mtd->erasesize = lower->erasesize;
652 mtd->writesize = lower->writesize;
653 mtd->writebufsize = lower->writesize;
654 mtd->oobsize = lower->oobsize;
655 mtd->oobavail = lower->oobavail;
656
657 mtd->erasesize_shift = lower->erasesize_shift;
658 mtd->writesize_shift = lower->writesize_shift;
659 mtd->erasesize_mask = lower->erasesize_mask;
660 mtd->writesize_mask = lower->writesize_mask;
661
662 mtd->bitflip_threshold = lower->bitflip_threshold;
663
664 mtd->ooblayout = lower->ooblayout;
665
666 mtd->ecc_step_size = lower->ecc_step_size;
667 mtd->ecc_strength = lower->ecc_strength;
668
669 mtd->numeraseregions = lower->numeraseregions;
670 mtd->eraseregions = lower->eraseregions;
671
672 mtd->_erase = nmbm_mtd_erase;
673 mtd->_read_oob = nmbm_mtd_read_oob;
674 mtd->_write_oob = nmbm_mtd_write_oob;
675 mtd->_block_isbad = nmbm_mtd_block_isbad;
676 mtd->_block_markbad = nmbm_mtd_block_markbad;
677 mtd->_reboot = nmbm_mtd_shutdown;
678
679 mtd_set_of_node(mtd, np);
680
681 ret = mtd_device_register(mtd, NULL, 0);
682 if (ret) {
683 dev_err(&pdev->dev, "failed to register mtd device\n");
684 nmbm_detach(nm->ni);
685 goto out;
686 }
687
688 platform_set_drvdata(pdev, nm);
689
690 mutex_lock(&nmbm_devs_lock);
691 list_add_tail(&nm->node, &nmbm_devs);
692 mutex_unlock(&nmbm_devs_lock);
693
694 return 0;
695
696out:
697 if (nm)
698 devm_kfree(&pdev->dev, nm);
699
700 put_mtd_device(lower);
701
702 return ret;
703}
704
705static int nmbm_remove(struct platform_device *pdev)
706{
707 struct nmbm_mtd *nm = platform_get_drvdata(pdev);
708 struct mtd_info *lower = nm->lower;
709 int ret;
710
711 ret = mtd_device_unregister(&nm->upper);
712 if (ret)
713 return ret;
714
715 nmbm_detach(nm->ni);
716
717 mutex_lock(&nmbm_devs_lock);
718 list_add_tail(&nm->node, &nmbm_devs);
719 mutex_unlock(&nmbm_devs_lock);
720
721 devm_kfree(&pdev->dev, nm);
722
723 put_mtd_device(lower);
724
725 platform_set_drvdata(pdev, NULL);
726
727 return 0;
728}
729
730static const struct of_device_id nmbm_ids[] = {
731 { .compatible = "generic,nmbm" },
732 { },
733};
734
735MODULE_DEVICE_TABLE(of, nmbm_ids);
736
737static struct platform_driver nmbm_driver = {
738 .probe = nmbm_probe,
739 .remove = nmbm_remove,
740 .driver = {
741 .name = "nmbm",
742 .of_match_table = nmbm_ids,
743 },
744};
745
746static int __init nmbm_init(void)
747{
748 int ret;
749
750 INIT_LIST_HEAD(&nmbm_devs);
751
752 ret = platform_driver_register(&nmbm_driver);
753 if (ret) {
754 pr_err("failed to register nmbm driver\n");
755 return ret;
756 }
757
758 return 0;
759}
760module_init(nmbm_init);
761
762static void __exit nmbm_exit(void)
763{
764 platform_driver_unregister(&nmbm_driver);
765}
766module_exit(nmbm_exit);
767
768MODULE_LICENSE("GPL");
769MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
770MODULE_DESCRIPTION("NAND mapping block management");