blob: a3e9e1832deb210c600494ef3765833036a28b90 [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MTD layer for NAND Mapped-block Management (NMBM)
4 *
5 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
6 *
7 * Author: Weijie Gao <weijie.gao@mediatek.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/flashchip.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of_platform.h>
23#include <linux/kern_levels.h>
24
25#include "nmbm-private.h"
26#include "nmbm-debug.h"
27
28#define NMBM_MAX_RATIO_DEFAULT 1
29#define NMBM_MAX_BLOCKS_DEFAULT 256
30
31struct nmbm_mtd {
32 struct mtd_info upper;
33 struct mtd_info *lower;
34
35 struct nmbm_instance *ni;
36 uint8_t *page_cache;
37
38 flstate_t state;
39 spinlock_t lock;
40 wait_queue_head_t wq;
41
42 struct device *dev;
43 struct list_head node;
44};
45
46struct list_head nmbm_devs;
47static DEFINE_MUTEX(nmbm_devs_lock);
48
49static int nmbm_lower_read_page(void *arg, uint64_t addr, void *buf, void *oob,
50 enum nmbm_oob_mode mode)
51{
52 struct nmbm_mtd *nm = arg;
53 struct mtd_oob_ops ops;
54 int ret;
55
56 memset(&ops, 0, sizeof(ops));
57
58 switch (mode) {
59 case NMBM_MODE_PLACE_OOB:
60 ops.mode = MTD_OPS_PLACE_OOB;
61 break;
62 case NMBM_MODE_AUTO_OOB:
63 ops.mode = MTD_OPS_AUTO_OOB;
64 break;
65 case NMBM_MODE_RAW:
66 ops.mode = MTD_OPS_RAW;
67 break;
68 default:
69 pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
70 return -ENOTSUPP;
71 }
72
73 if (buf) {
74 ops.datbuf = buf;
75 ops.len = nm->lower->writesize;
76 }
77
78 if (oob) {
79 ops.oobbuf = oob;
80 ops.ooblen = mtd_oobavail(nm->lower, &ops);
81 }
82
83 ret = mtd_read_oob(nm->lower, addr, &ops);
84 nm->upper.ecc_stats.corrected = nm->lower->ecc_stats.corrected;
85 nm->upper.ecc_stats.failed = nm->lower->ecc_stats.failed;
86
developer25625b62021-06-16 17:34:42 +080087 /* Report error on failure (including ecc error) */
88 if (ret < 0 && ret != -EUCLEAN)
developer8d16ac22021-05-26 15:32:12 +080089 return ret;
90
developer25625b62021-06-16 17:34:42 +080091 /*
92 * Since mtd_read_oob() won't report exact bitflips, what we can know
93 * is whether bitflips exceeds the threshold.
94 * We want the -EUCLEAN to be passed to the upper layer, but not the
95 * error value itself. To achieve this, report bitflips above the
96 * threshold.
97 */
98
99 if (ret == -EUCLEAN) {
100 return min_t(u32, nm->lower->bitflip_threshold + 1,
101 nm->lower->ecc_strength);
102 }
103
104 /* For bitflips less than the threshold, return 0 */
developer8d16ac22021-05-26 15:32:12 +0800105 return 0;
106}
107
108static int nmbm_lower_write_page(void *arg, uint64_t addr, const void *buf,
109 const void *oob, enum nmbm_oob_mode mode)
110{
111 struct nmbm_mtd *nm = arg;
112 struct mtd_oob_ops ops;
113
114 memset(&ops, 0, sizeof(ops));
115
116 switch (mode) {
117 case NMBM_MODE_PLACE_OOB:
118 ops.mode = MTD_OPS_PLACE_OOB;
119 break;
120 case NMBM_MODE_AUTO_OOB:
121 ops.mode = MTD_OPS_AUTO_OOB;
122 break;
123 case NMBM_MODE_RAW:
124 ops.mode = MTD_OPS_RAW;
125 break;
126 default:
127 pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
128 return -ENOTSUPP;
129 }
130
131 if (buf) {
132 ops.datbuf = (uint8_t *)buf;
133 ops.len = nm->lower->writesize;
134 }
135
136 if (oob) {
137 ops.oobbuf = (uint8_t *)oob;
138 ops.ooblen = mtd_oobavail(nm->lower, &ops);
139 }
140
141 return mtd_write_oob(nm->lower, addr, &ops);
142}
143
144static int nmbm_lower_erase_block(void *arg, uint64_t addr)
145{
146 struct nmbm_mtd *nm = arg;
147 struct erase_info ei;
148
149 memset(&ei, 0, sizeof(ei));
150
151 ei.addr = addr;
152 ei.len = nm->lower->erasesize;
153
154 return mtd_erase(nm->lower, &ei);
155}
156
157static int nmbm_lower_is_bad_block(void *arg, uint64_t addr)
158{
159 struct nmbm_mtd *nm = arg;
160
161 return mtd_block_isbad(nm->lower, addr);
162}
163
164static int nmbm_lower_mark_bad_block(void *arg, uint64_t addr)
165{
166 struct nmbm_mtd *nm = arg;
167
168 return mtd_block_markbad(nm->lower, addr);
169}
170
171static void nmbm_lower_log(void *arg, enum nmbm_log_category level,
172 const char *fmt, va_list ap)
173{
174 struct nmbm_mtd *nm = arg;
175 char *msg;
176 char *kl;
177
178 msg = kvasprintf(GFP_KERNEL, fmt, ap);
179 if (!msg) {
180 dev_warn(nm->dev, "unable to print log\n");
181 return;
182 }
183
184 switch (level) {
185 case NMBM_LOG_DEBUG:
186 kl = KERN_DEBUG;
187 break;
188 case NMBM_LOG_WARN:
189 kl = KERN_WARNING;
190 break;
191 case NMBM_LOG_ERR:
192 kl = KERN_ERR;
193 break;
194 case NMBM_LOG_EMERG:
195 kl = KERN_EMERG;
196 break;
197 default:
198 kl = KERN_INFO ;
199 }
200
201 dev_printk(kl, nm->dev, "%s", msg);
202
203 kfree(msg);
204}
205
206static int nmbm_get_device(struct nmbm_mtd *nm, int new_state)
207{
208 DECLARE_WAITQUEUE(wait, current);
209
210retry:
211 spin_lock(&nm->lock);
212
213 if (nm->state == FL_READY) {
214 nm->state = new_state;
215 spin_unlock(&nm->lock);
216 return 0;
217 }
218
219 if (new_state == FL_PM_SUSPENDED) {
220 if (nm->state == FL_PM_SUSPENDED) {
221 spin_unlock(&nm->lock);
222 return 0;
223 }
224 }
225
226 set_current_state(TASK_UNINTERRUPTIBLE);
227 add_wait_queue(&nm->wq, &wait);
228 spin_unlock(&nm->lock);
229 schedule();
230 remove_wait_queue(&nm->wq, &wait);
231 goto retry;
232}
233
234static void nmbm_release_device(struct nmbm_mtd *nm)
235{
236 spin_lock(&nm->lock);
237 nm->state = FL_READY;
238 wake_up(&nm->wq);
239 spin_unlock(&nm->lock);
240}
241
242static int nmbm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
243{
244 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
245 int ret;
246
247 nmbm_get_device(nm, FL_ERASING);
248
249 ret = nmbm_erase_block_range(nm->ni, instr->addr, instr->len,
250 &instr->fail_addr);
251
252 nmbm_release_device(nm);
253
254 if (!ret)
255 return 0;
256
257 return -EIO;
258}
259
260static int nmbm_mtd_read_data(struct nmbm_mtd *nm, uint64_t addr,
261 struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
262{
263 size_t len, ooblen, maxooblen, chklen;
264 uint32_t col, ooboffs;
265 uint8_t *datcache, *oobcache;
developer25625b62021-06-16 17:34:42 +0800266 bool has_ecc_err = false;
267 int ret, max_bitflips = 0;
developer8d16ac22021-05-26 15:32:12 +0800268
269 col = addr & nm->lower->writesize_mask;
270 addr &= ~nm->lower->writesize_mask;
271 maxooblen = mtd_oobavail(nm->lower, ops);
272 ooboffs = ops->ooboffs;
273 ooblen = ops->ooblen;
274 len = ops->len;
275
276 datcache = len ? nm->page_cache : NULL;
277 oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
278
279 ops->oobretlen = 0;
280 ops->retlen = 0;
281
282 while (len || ooblen) {
283 ret = nmbm_read_single_page(nm->ni, addr, datcache, oobcache,
284 mode);
developer25625b62021-06-16 17:34:42 +0800285 if (ret < 0 && ret != -EBADMSG)
286 return ret;
287
288 /* Continue reading on ecc error */
289 if (ret == -EBADMSG)
290 has_ecc_err = true;
291
292 /* Record the maximum bitflips between pages */
293 if (ret > max_bitflips)
294 max_bitflips = ret;
developer8d16ac22021-05-26 15:32:12 +0800295
296 if (len) {
297 /* Move data */
298 chklen = nm->lower->writesize - col;
299 if (chklen > len)
300 chklen = len;
301
302 memcpy(ops->datbuf + ops->retlen, datcache + col,
303 chklen);
304 len -= chklen;
305 col = 0; /* (col + chklen) % */
306 ops->retlen += chklen;
307 }
308
309 if (ooblen) {
310 /* Move oob */
311 chklen = maxooblen - ooboffs;
312 if (chklen > ooblen)
313 chklen = ooblen;
314
315 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
316 chklen);
317 ooblen -= chklen;
318 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
319 ops->oobretlen += chklen;
320 }
321
322 addr += nm->lower->writesize;
323 }
324
developer25625b62021-06-16 17:34:42 +0800325 if (has_ecc_err)
326 return -EBADMSG;
327
328 return max_bitflips;
developer8d16ac22021-05-26 15:32:12 +0800329}
330
331static int nmbm_mtd_read_oob(struct mtd_info *mtd, loff_t from,
332 struct mtd_oob_ops *ops)
333{
334 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
335 uint32_t maxooblen;
336 enum nmbm_oob_mode mode;
337 int ret;
338
339 if (!ops->oobbuf && !ops->datbuf) {
340 if (ops->ooblen || ops->len)
341 return -EINVAL;
342
343 return 0;
344 }
345
346 switch (ops->mode) {
347 case MTD_OPS_PLACE_OOB:
348 mode = NMBM_MODE_PLACE_OOB;
349 break;
350 case MTD_OPS_AUTO_OOB:
351 mode = NMBM_MODE_AUTO_OOB;
352 break;
353 case MTD_OPS_RAW:
354 mode = NMBM_MODE_RAW;
355 break;
356 default:
357 pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
358 return -ENOTSUPP;
359 }
360
361 maxooblen = mtd_oobavail(mtd, ops);
362
363 /* Do not allow read past end of device */
364 if (ops->datbuf && (from + ops->len) > mtd->size) {
365 pr_debug("%s: attempt to read beyond end of device\n",
366 __func__);
367 return -EINVAL;
368 }
369
370 if (!ops->oobbuf) {
371 nmbm_get_device(nm, FL_READING);
372
373 /* Optimized for reading data only */
374 ret = nmbm_read_range(nm->ni, from, ops->len, ops->datbuf,
375 mode, &ops->retlen);
376
377 nmbm_release_device(nm);
378
developer25625b62021-06-16 17:34:42 +0800379 return ret;
developer8d16ac22021-05-26 15:32:12 +0800380 }
381
382 if (unlikely(ops->ooboffs >= maxooblen)) {
383 pr_debug("%s: attempt to start read outside oob\n",
384 __func__);
385 return -EINVAL;
386 }
387
388 if (unlikely(from >= mtd->size ||
389 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
390 (from >> mtd->writesize_shift)) * maxooblen)) {
391 pr_debug("%s: attempt to read beyond end of device\n",
392 __func__);
393 return -EINVAL;
394 }
395
396 nmbm_get_device(nm, FL_READING);
397 ret = nmbm_mtd_read_data(nm, from, ops, mode);
398 nmbm_release_device(nm);
399
400 return ret;
401}
402
403static int nmbm_mtd_write_data(struct nmbm_mtd *nm, uint64_t addr,
404 struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
405{
406 size_t len, ooblen, maxooblen, chklen;
407 uint32_t col, ooboffs;
408 uint8_t *datcache, *oobcache;
409 int ret;
410
411 col = addr & nm->lower->writesize_mask;
412 addr &= ~nm->lower->writesize_mask;
413 maxooblen = mtd_oobavail(nm->lower, ops);
414 ooboffs = ops->ooboffs;
415 ooblen = ops->ooblen;
416 len = ops->len;
417
418 datcache = len ? nm->page_cache : NULL;
419 oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
420
421 ops->oobretlen = 0;
422 ops->retlen = 0;
423
424 while (len || ooblen) {
425 if (len) {
426 /* Move data */
427 chklen = nm->lower->writesize - col;
428 if (chklen > len)
429 chklen = len;
430
431 memset(datcache, 0xff, col);
432 memcpy(datcache + col, ops->datbuf + ops->retlen,
433 chklen);
434 memset(datcache + col + chklen, 0xff,
435 nm->lower->writesize - col - chklen);
436 len -= chklen;
437 col = 0; /* (col + chklen) % */
438 ops->retlen += chklen;
439 }
440
441 if (ooblen) {
442 /* Move oob */
443 chklen = maxooblen - ooboffs;
444 if (chklen > ooblen)
445 chklen = ooblen;
446
447 memset(oobcache, 0xff, ooboffs);
448 memcpy(oobcache + ooboffs,
449 ops->oobbuf + ops->oobretlen, chklen);
450 memset(oobcache + ooboffs + chklen, 0xff,
451 nm->lower->oobsize - ooboffs - chklen);
452 ooblen -= chklen;
453 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
454 ops->oobretlen += chklen;
455 }
456
457 ret = nmbm_write_single_page(nm->ni, addr, datcache, oobcache,
458 mode);
459 if (ret)
460 return ret;
461
462 addr += nm->lower->writesize;
463 }
464
465 return 0;
466}
467
468static int nmbm_mtd_write_oob(struct mtd_info *mtd, loff_t to,
469 struct mtd_oob_ops *ops)
470{
471 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
472 enum nmbm_oob_mode mode;
473 uint32_t maxooblen;
474 int ret;
475
476 if (!ops->oobbuf && !ops->datbuf) {
477 if (ops->ooblen || ops->len)
478 return -EINVAL;
479
480 return 0;
481 }
482
483 switch (ops->mode) {
484 case MTD_OPS_PLACE_OOB:
485 mode = NMBM_MODE_PLACE_OOB;
486 break;
487 case MTD_OPS_AUTO_OOB:
488 mode = NMBM_MODE_AUTO_OOB;
489 break;
490 case MTD_OPS_RAW:
491 mode = NMBM_MODE_RAW;
492 break;
493 default:
494 pr_debug("%s: unsupported oob mode: %u\n", __func__,
495 ops->mode);
496 return -ENOTSUPP;
497 }
498
499 maxooblen = mtd_oobavail(mtd, ops);
500
501 /* Do not allow write past end of device */
502 if (ops->datbuf && (to + ops->len) > mtd->size) {
503 pr_debug("%s: attempt to write beyond end of device\n",
504 __func__);
505 return -EINVAL;
506 }
507
508 if (!ops->oobbuf) {
509 nmbm_get_device(nm, FL_WRITING);
510
511 /* Optimized for writing data only */
512 ret = nmbm_write_range(nm->ni, to, ops->len, ops->datbuf,
513 mode, &ops->retlen);
514
515 nmbm_release_device(nm);
516
517 return ret;
518 }
519
520 if (unlikely(ops->ooboffs >= maxooblen)) {
521 pr_debug("%s: attempt to start write outside oob\n",
522 __func__);
523 return -EINVAL;
524 }
525
526 if (unlikely(to >= mtd->size ||
527 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
528 (to >> mtd->writesize_shift)) * maxooblen)) {
529 pr_debug("%s: attempt to write beyond end of device\n",
530 __func__);
531 return -EINVAL;
532 }
533
534 nmbm_get_device(nm, FL_WRITING);
535 ret = nmbm_mtd_write_data(nm, to, ops, mode);
536 nmbm_release_device(nm);
537
538 return ret;
539}
540
541static int nmbm_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
542{
543 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
544 int ret;
545
546 nmbm_get_device(nm, FL_READING);
547 ret = nmbm_check_bad_block(nm->ni, offs);
548 nmbm_release_device(nm);
549
550 return ret;
551}
552
553static int nmbm_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
554{
555 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
556 int ret;
557
558 nmbm_get_device(nm, FL_WRITING);
559 ret = nmbm_mark_bad_block(nm->ni, offs);
560 nmbm_release_device(nm);
561
562 return ret;
563}
564
565static void nmbm_mtd_shutdown(struct mtd_info *mtd)
566{
567 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
568
569 nmbm_get_device(nm, FL_PM_SUSPENDED);
570}
571
572static int nmbm_probe(struct platform_device *pdev)
573{
574 struct device_node *mtd_np, *np = pdev->dev.of_node;
575 uint32_t max_ratio, max_reserved_blocks, alloc_size;
developer25625b62021-06-16 17:34:42 +0800576 bool forced_create, empty_page_ecc_ok;
developer8d16ac22021-05-26 15:32:12 +0800577 struct nmbm_lower_device nld;
578 struct mtd_info *lower, *mtd;
579 struct nmbm_mtd *nm;
580 const char *mtdname;
developer8d16ac22021-05-26 15:32:12 +0800581 int ret;
582
583 mtd_np = of_parse_phandle(np, "lower-mtd-device", 0);
584 if (mtd_np) {
585 lower = get_mtd_device_by_node(mtd_np);
586 if (!IS_ERR(lower))
587 goto do_attach_mtd;
588
589 dev_dbg(&pdev->dev, "failed to find mtd device by phandle\n");
590 return -EPROBE_DEFER;
591 }
592
593 ret = of_property_read_string(np, "lower-mtd-name", &mtdname);
594 if (!ret) {
595 lower = get_mtd_device_nm(mtdname);
596 if (!IS_ERR(lower))
597 goto do_attach_mtd;
598
599 dev_dbg(&pdev->dev, "failed to find mtd device by name '%s'\n",
600 mtdname);
601 return -EPROBE_DEFER;
602 }
603
604do_attach_mtd:
605 if (of_property_read_u32(np, "max-ratio", &max_ratio))
606 max_ratio = NMBM_MAX_RATIO_DEFAULT;
607
608 if (of_property_read_u32(np, "max-reserved-blocks",
609 &max_reserved_blocks))
610 max_reserved_blocks = NMBM_MAX_BLOCKS_DEFAULT;
611
612 forced_create = of_property_read_bool(np, "forced-create");
developer25625b62021-06-16 17:34:42 +0800613 empty_page_ecc_ok = of_property_read_bool(np,
614 "empty-page-ecc-protected");
developer8d16ac22021-05-26 15:32:12 +0800615
616 memset(&nld, 0, sizeof(nld));
617
developer25625b62021-06-16 17:34:42 +0800618 nld.flags = 0;
619
620 if (forced_create)
621 nld.flags |= NMBM_F_CREATE;
622
623 if (empty_page_ecc_ok)
624 nld.flags |= NMBM_F_EMPTY_PAGE_ECC_OK;
625
developer8d16ac22021-05-26 15:32:12 +0800626 nld.max_ratio = max_ratio;
627 nld.max_reserved_blocks = max_reserved_blocks;
628
629 nld.size = lower->size;
630 nld.erasesize = lower->erasesize;
631 nld.writesize = lower->writesize;
632 nld.oobsize = lower->oobsize;
633 nld.oobavail = lower->oobavail;
634
635 nld.read_page = nmbm_lower_read_page;
636 nld.write_page = nmbm_lower_write_page;
637 nld.erase_block = nmbm_lower_erase_block;
638 nld.is_bad_block = nmbm_lower_is_bad_block;
639 nld.mark_bad_block = nmbm_lower_mark_bad_block;
640
641 nld.logprint = nmbm_lower_log;
642
643 alloc_size = nmbm_calc_structure_size(&nld);
644
645 nm = devm_kzalloc(&pdev->dev, sizeof(*nm) + alloc_size +
646 lower->writesize + lower->oobsize, GFP_KERNEL);
647 if (!nm) {
648 ret = -ENOMEM;
649 goto out;
650 }
651
652 nm->ni = (void *)nm + sizeof(*nm);
653 nm->page_cache = (uint8_t *)nm->ni + alloc_size;
654 nm->lower = lower;
655 nm->dev = &pdev->dev;
656
657 INIT_LIST_HEAD(&nm->node);
658 spin_lock_init(&nm->lock);
659 init_waitqueue_head(&nm->wq);
660
661 nld.arg = nm;
662
663 ret = nmbm_attach(&nld, nm->ni);
664 if (ret)
665 goto out;
666
667 /* Initialize upper mtd */
668 mtd = &nm->upper;
669
670 mtd->owner = THIS_MODULE;
671 mtd->dev.parent = &pdev->dev;
672 mtd->type = lower->type;
673 mtd->flags = lower->flags;
674
675 mtd->size = (uint64_t)nm->ni->data_block_count * lower->erasesize;
676 mtd->erasesize = lower->erasesize;
677 mtd->writesize = lower->writesize;
678 mtd->writebufsize = lower->writesize;
679 mtd->oobsize = lower->oobsize;
680 mtd->oobavail = lower->oobavail;
681
682 mtd->erasesize_shift = lower->erasesize_shift;
683 mtd->writesize_shift = lower->writesize_shift;
684 mtd->erasesize_mask = lower->erasesize_mask;
685 mtd->writesize_mask = lower->writesize_mask;
686
687 mtd->bitflip_threshold = lower->bitflip_threshold;
688
689 mtd->ooblayout = lower->ooblayout;
690
691 mtd->ecc_step_size = lower->ecc_step_size;
692 mtd->ecc_strength = lower->ecc_strength;
693
694 mtd->numeraseregions = lower->numeraseregions;
695 mtd->eraseregions = lower->eraseregions;
696
697 mtd->_erase = nmbm_mtd_erase;
698 mtd->_read_oob = nmbm_mtd_read_oob;
699 mtd->_write_oob = nmbm_mtd_write_oob;
700 mtd->_block_isbad = nmbm_mtd_block_isbad;
701 mtd->_block_markbad = nmbm_mtd_block_markbad;
702 mtd->_reboot = nmbm_mtd_shutdown;
703
704 mtd_set_of_node(mtd, np);
705
706 ret = mtd_device_register(mtd, NULL, 0);
707 if (ret) {
708 dev_err(&pdev->dev, "failed to register mtd device\n");
709 nmbm_detach(nm->ni);
710 goto out;
711 }
712
713 platform_set_drvdata(pdev, nm);
714
715 mutex_lock(&nmbm_devs_lock);
716 list_add_tail(&nm->node, &nmbm_devs);
717 mutex_unlock(&nmbm_devs_lock);
718
719 return 0;
720
721out:
722 if (nm)
723 devm_kfree(&pdev->dev, nm);
724
725 put_mtd_device(lower);
726
727 return ret;
728}
729
730static int nmbm_remove(struct platform_device *pdev)
731{
732 struct nmbm_mtd *nm = platform_get_drvdata(pdev);
733 struct mtd_info *lower = nm->lower;
734 int ret;
735
736 ret = mtd_device_unregister(&nm->upper);
737 if (ret)
738 return ret;
739
740 nmbm_detach(nm->ni);
741
742 mutex_lock(&nmbm_devs_lock);
743 list_add_tail(&nm->node, &nmbm_devs);
744 mutex_unlock(&nmbm_devs_lock);
745
746 devm_kfree(&pdev->dev, nm);
747
748 put_mtd_device(lower);
749
750 platform_set_drvdata(pdev, NULL);
751
752 return 0;
753}
754
755static const struct of_device_id nmbm_ids[] = {
756 { .compatible = "generic,nmbm" },
757 { },
758};
759
760MODULE_DEVICE_TABLE(of, nmbm_ids);
761
762static struct platform_driver nmbm_driver = {
763 .probe = nmbm_probe,
764 .remove = nmbm_remove,
765 .driver = {
766 .name = "nmbm",
767 .of_match_table = nmbm_ids,
768 },
769};
770
771static int __init nmbm_init(void)
772{
773 int ret;
774
775 INIT_LIST_HEAD(&nmbm_devs);
776
777 ret = platform_driver_register(&nmbm_driver);
778 if (ret) {
779 pr_err("failed to register nmbm driver\n");
780 return ret;
781 }
782
783 return 0;
784}
785module_init(nmbm_init);
786
787static void __exit nmbm_exit(void)
788{
789 platform_driver_unregister(&nmbm_driver);
790}
791module_exit(nmbm_exit);
792
793MODULE_LICENSE("GPL");
794MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
795MODULE_DESCRIPTION("NAND mapping block management");