blob: 94413a32c99a862ef1901877584a6fbeaf5511f4 [file] [log] [blame]
developer8d16ac22021-05-26 15:32:12 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MTD layer for NAND Mapped-block Management (NMBM)
4 *
5 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
6 *
7 * Author: Weijie Gao <weijie.gao@mediatek.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/flashchip.h>
21#include <linux/mtd/partitions.h>
22#include <linux/of_platform.h>
23#include <linux/kern_levels.h>
24
25#include "nmbm-private.h"
26#include "nmbm-debug.h"
27
28#define NMBM_MAX_RATIO_DEFAULT 1
29#define NMBM_MAX_BLOCKS_DEFAULT 256
30
31struct nmbm_mtd {
32 struct mtd_info upper;
33 struct mtd_info *lower;
34
35 struct nmbm_instance *ni;
36 uint8_t *page_cache;
37
38 flstate_t state;
39 spinlock_t lock;
40 wait_queue_head_t wq;
41
42 struct device *dev;
43 struct list_head node;
44};
45
46struct list_head nmbm_devs;
47static DEFINE_MUTEX(nmbm_devs_lock);
48
49static int nmbm_lower_read_page(void *arg, uint64_t addr, void *buf, void *oob,
50 enum nmbm_oob_mode mode)
51{
52 struct nmbm_mtd *nm = arg;
53 struct mtd_oob_ops ops;
54 int ret;
55
56 memset(&ops, 0, sizeof(ops));
57
58 switch (mode) {
59 case NMBM_MODE_PLACE_OOB:
60 ops.mode = MTD_OPS_PLACE_OOB;
61 break;
62 case NMBM_MODE_AUTO_OOB:
63 ops.mode = MTD_OPS_AUTO_OOB;
64 break;
65 case NMBM_MODE_RAW:
66 ops.mode = MTD_OPS_RAW;
67 break;
68 default:
69 pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
70 return -ENOTSUPP;
71 }
72
73 if (buf) {
74 ops.datbuf = buf;
75 ops.len = nm->lower->writesize;
76 }
77
78 if (oob) {
79 ops.oobbuf = oob;
80 ops.ooblen = mtd_oobavail(nm->lower, &ops);
81 }
82
83 ret = mtd_read_oob(nm->lower, addr, &ops);
84 nm->upper.ecc_stats.corrected = nm->lower->ecc_stats.corrected;
85 nm->upper.ecc_stats.failed = nm->lower->ecc_stats.failed;
86
developer25625b62021-06-16 17:34:42 +080087 /* Report error on failure (including ecc error) */
88 if (ret < 0 && ret != -EUCLEAN)
developer8d16ac22021-05-26 15:32:12 +080089 return ret;
90
developer25625b62021-06-16 17:34:42 +080091 /*
92 * Since mtd_read_oob() won't report exact bitflips, what we can know
93 * is whether bitflips exceeds the threshold.
94 * We want the -EUCLEAN to be passed to the upper layer, but not the
95 * error value itself. To achieve this, report bitflips above the
96 * threshold.
97 */
98
99 if (ret == -EUCLEAN) {
100 return min_t(u32, nm->lower->bitflip_threshold + 1,
101 nm->lower->ecc_strength);
102 }
103
104 /* For bitflips less than the threshold, return 0 */
developer8d16ac22021-05-26 15:32:12 +0800105 return 0;
106}
107
108static int nmbm_lower_write_page(void *arg, uint64_t addr, const void *buf,
109 const void *oob, enum nmbm_oob_mode mode)
110{
111 struct nmbm_mtd *nm = arg;
112 struct mtd_oob_ops ops;
113
114 memset(&ops, 0, sizeof(ops));
115
116 switch (mode) {
117 case NMBM_MODE_PLACE_OOB:
118 ops.mode = MTD_OPS_PLACE_OOB;
119 break;
120 case NMBM_MODE_AUTO_OOB:
121 ops.mode = MTD_OPS_AUTO_OOB;
122 break;
123 case NMBM_MODE_RAW:
124 ops.mode = MTD_OPS_RAW;
125 break;
126 default:
127 pr_debug("%s: unsupported NMBM mode: %u\n", __func__, mode);
128 return -ENOTSUPP;
129 }
130
131 if (buf) {
132 ops.datbuf = (uint8_t *)buf;
133 ops.len = nm->lower->writesize;
134 }
135
136 if (oob) {
137 ops.oobbuf = (uint8_t *)oob;
138 ops.ooblen = mtd_oobavail(nm->lower, &ops);
139 }
140
141 return mtd_write_oob(nm->lower, addr, &ops);
142}
143
developerd8cf71c2023-06-20 19:10:04 +0800144static int nmbm_lower_panic_write_page(void *arg, uint64_t addr,
145 const void *buf)
146{
147 struct nmbm_mtd *nm = arg;
148 size_t retlen;
149
150 return mtd_panic_write(nm->lower, addr, nm->lower->writesize, &retlen,
151 buf);
152}
153
developer8d16ac22021-05-26 15:32:12 +0800154static int nmbm_lower_erase_block(void *arg, uint64_t addr)
155{
156 struct nmbm_mtd *nm = arg;
157 struct erase_info ei;
158
159 memset(&ei, 0, sizeof(ei));
160
161 ei.addr = addr;
162 ei.len = nm->lower->erasesize;
163
164 return mtd_erase(nm->lower, &ei);
165}
166
167static int nmbm_lower_is_bad_block(void *arg, uint64_t addr)
168{
169 struct nmbm_mtd *nm = arg;
170
171 return mtd_block_isbad(nm->lower, addr);
172}
173
174static int nmbm_lower_mark_bad_block(void *arg, uint64_t addr)
175{
176 struct nmbm_mtd *nm = arg;
177
178 return mtd_block_markbad(nm->lower, addr);
179}
180
181static void nmbm_lower_log(void *arg, enum nmbm_log_category level,
182 const char *fmt, va_list ap)
183{
184 struct nmbm_mtd *nm = arg;
185 char *msg;
186 char *kl;
187
188 msg = kvasprintf(GFP_KERNEL, fmt, ap);
189 if (!msg) {
190 dev_warn(nm->dev, "unable to print log\n");
191 return;
192 }
193
194 switch (level) {
195 case NMBM_LOG_DEBUG:
196 kl = KERN_DEBUG;
197 break;
198 case NMBM_LOG_WARN:
199 kl = KERN_WARNING;
200 break;
201 case NMBM_LOG_ERR:
202 kl = KERN_ERR;
203 break;
204 case NMBM_LOG_EMERG:
205 kl = KERN_EMERG;
206 break;
207 default:
208 kl = KERN_INFO ;
209 }
210
211 dev_printk(kl, nm->dev, "%s", msg);
212
213 kfree(msg);
214}
215
216static int nmbm_get_device(struct nmbm_mtd *nm, int new_state)
217{
218 DECLARE_WAITQUEUE(wait, current);
219
220retry:
221 spin_lock(&nm->lock);
222
223 if (nm->state == FL_READY) {
224 nm->state = new_state;
225 spin_unlock(&nm->lock);
226 return 0;
227 }
228
229 if (new_state == FL_PM_SUSPENDED) {
230 if (nm->state == FL_PM_SUSPENDED) {
231 spin_unlock(&nm->lock);
232 return 0;
233 }
234 }
235
236 set_current_state(TASK_UNINTERRUPTIBLE);
237 add_wait_queue(&nm->wq, &wait);
238 spin_unlock(&nm->lock);
239 schedule();
240 remove_wait_queue(&nm->wq, &wait);
241 goto retry;
242}
243
244static void nmbm_release_device(struct nmbm_mtd *nm)
245{
246 spin_lock(&nm->lock);
247 nm->state = FL_READY;
248 wake_up(&nm->wq);
249 spin_unlock(&nm->lock);
250}
251
252static int nmbm_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
253{
254 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
255 int ret;
256
257 nmbm_get_device(nm, FL_ERASING);
258
259 ret = nmbm_erase_block_range(nm->ni, instr->addr, instr->len,
260 &instr->fail_addr);
261
262 nmbm_release_device(nm);
263
264 if (!ret)
265 return 0;
266
267 return -EIO;
268}
269
270static int nmbm_mtd_read_data(struct nmbm_mtd *nm, uint64_t addr,
271 struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
272{
273 size_t len, ooblen, maxooblen, chklen;
274 uint32_t col, ooboffs;
275 uint8_t *datcache, *oobcache;
developer25625b62021-06-16 17:34:42 +0800276 bool has_ecc_err = false;
277 int ret, max_bitflips = 0;
developer8d16ac22021-05-26 15:32:12 +0800278
279 col = addr & nm->lower->writesize_mask;
280 addr &= ~nm->lower->writesize_mask;
281 maxooblen = mtd_oobavail(nm->lower, ops);
282 ooboffs = ops->ooboffs;
283 ooblen = ops->ooblen;
284 len = ops->len;
285
286 datcache = len ? nm->page_cache : NULL;
287 oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
288
289 ops->oobretlen = 0;
290 ops->retlen = 0;
291
292 while (len || ooblen) {
293 ret = nmbm_read_single_page(nm->ni, addr, datcache, oobcache,
294 mode);
developer25625b62021-06-16 17:34:42 +0800295 if (ret < 0 && ret != -EBADMSG)
296 return ret;
297
298 /* Continue reading on ecc error */
299 if (ret == -EBADMSG)
300 has_ecc_err = true;
301
302 /* Record the maximum bitflips between pages */
303 if (ret > max_bitflips)
304 max_bitflips = ret;
developer8d16ac22021-05-26 15:32:12 +0800305
306 if (len) {
307 /* Move data */
308 chklen = nm->lower->writesize - col;
309 if (chklen > len)
310 chklen = len;
311
312 memcpy(ops->datbuf + ops->retlen, datcache + col,
313 chklen);
314 len -= chklen;
315 col = 0; /* (col + chklen) % */
316 ops->retlen += chklen;
317 }
318
319 if (ooblen) {
320 /* Move oob */
321 chklen = maxooblen - ooboffs;
322 if (chklen > ooblen)
323 chklen = ooblen;
324
325 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
326 chklen);
327 ooblen -= chklen;
328 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
329 ops->oobretlen += chklen;
330 }
331
332 addr += nm->lower->writesize;
333 }
334
developer25625b62021-06-16 17:34:42 +0800335 if (has_ecc_err)
336 return -EBADMSG;
337
338 return max_bitflips;
developer8d16ac22021-05-26 15:32:12 +0800339}
340
341static int nmbm_mtd_read_oob(struct mtd_info *mtd, loff_t from,
342 struct mtd_oob_ops *ops)
343{
344 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
345 uint32_t maxooblen;
346 enum nmbm_oob_mode mode;
347 int ret;
348
349 if (!ops->oobbuf && !ops->datbuf) {
350 if (ops->ooblen || ops->len)
351 return -EINVAL;
352
353 return 0;
354 }
355
356 switch (ops->mode) {
357 case MTD_OPS_PLACE_OOB:
358 mode = NMBM_MODE_PLACE_OOB;
359 break;
360 case MTD_OPS_AUTO_OOB:
361 mode = NMBM_MODE_AUTO_OOB;
362 break;
363 case MTD_OPS_RAW:
364 mode = NMBM_MODE_RAW;
365 break;
366 default:
367 pr_debug("%s: unsupported oob mode: %u\n", __func__, ops->mode);
368 return -ENOTSUPP;
369 }
370
371 maxooblen = mtd_oobavail(mtd, ops);
372
373 /* Do not allow read past end of device */
374 if (ops->datbuf && (from + ops->len) > mtd->size) {
375 pr_debug("%s: attempt to read beyond end of device\n",
376 __func__);
377 return -EINVAL;
378 }
379
380 if (!ops->oobbuf) {
381 nmbm_get_device(nm, FL_READING);
382
383 /* Optimized for reading data only */
384 ret = nmbm_read_range(nm->ni, from, ops->len, ops->datbuf,
385 mode, &ops->retlen);
386
387 nmbm_release_device(nm);
388
developer25625b62021-06-16 17:34:42 +0800389 return ret;
developer8d16ac22021-05-26 15:32:12 +0800390 }
391
392 if (unlikely(ops->ooboffs >= maxooblen)) {
393 pr_debug("%s: attempt to start read outside oob\n",
394 __func__);
395 return -EINVAL;
396 }
397
398 if (unlikely(from >= mtd->size ||
399 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
400 (from >> mtd->writesize_shift)) * maxooblen)) {
401 pr_debug("%s: attempt to read beyond end of device\n",
402 __func__);
403 return -EINVAL;
404 }
405
406 nmbm_get_device(nm, FL_READING);
407 ret = nmbm_mtd_read_data(nm, from, ops, mode);
408 nmbm_release_device(nm);
409
410 return ret;
411}
412
413static int nmbm_mtd_write_data(struct nmbm_mtd *nm, uint64_t addr,
414 struct mtd_oob_ops *ops, enum nmbm_oob_mode mode)
415{
416 size_t len, ooblen, maxooblen, chklen;
417 uint32_t col, ooboffs;
418 uint8_t *datcache, *oobcache;
419 int ret;
420
421 col = addr & nm->lower->writesize_mask;
422 addr &= ~nm->lower->writesize_mask;
423 maxooblen = mtd_oobavail(nm->lower, ops);
424 ooboffs = ops->ooboffs;
425 ooblen = ops->ooblen;
426 len = ops->len;
427
428 datcache = len ? nm->page_cache : NULL;
429 oobcache = ooblen ? nm->page_cache + nm->lower->writesize : NULL;
430
431 ops->oobretlen = 0;
432 ops->retlen = 0;
433
434 while (len || ooblen) {
435 if (len) {
436 /* Move data */
437 chklen = nm->lower->writesize - col;
438 if (chklen > len)
439 chklen = len;
440
441 memset(datcache, 0xff, col);
442 memcpy(datcache + col, ops->datbuf + ops->retlen,
443 chklen);
444 memset(datcache + col + chklen, 0xff,
445 nm->lower->writesize - col - chklen);
446 len -= chklen;
447 col = 0; /* (col + chklen) % */
448 ops->retlen += chklen;
449 }
450
451 if (ooblen) {
452 /* Move oob */
453 chklen = maxooblen - ooboffs;
454 if (chklen > ooblen)
455 chklen = ooblen;
456
457 memset(oobcache, 0xff, ooboffs);
458 memcpy(oobcache + ooboffs,
459 ops->oobbuf + ops->oobretlen, chklen);
460 memset(oobcache + ooboffs + chklen, 0xff,
461 nm->lower->oobsize - ooboffs - chklen);
462 ooblen -= chklen;
463 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
464 ops->oobretlen += chklen;
465 }
466
467 ret = nmbm_write_single_page(nm->ni, addr, datcache, oobcache,
468 mode);
469 if (ret)
470 return ret;
471
472 addr += nm->lower->writesize;
473 }
474
475 return 0;
476}
477
478static int nmbm_mtd_write_oob(struct mtd_info *mtd, loff_t to,
479 struct mtd_oob_ops *ops)
480{
481 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
482 enum nmbm_oob_mode mode;
483 uint32_t maxooblen;
484 int ret;
485
486 if (!ops->oobbuf && !ops->datbuf) {
487 if (ops->ooblen || ops->len)
488 return -EINVAL;
489
490 return 0;
491 }
492
493 switch (ops->mode) {
494 case MTD_OPS_PLACE_OOB:
495 mode = NMBM_MODE_PLACE_OOB;
496 break;
497 case MTD_OPS_AUTO_OOB:
498 mode = NMBM_MODE_AUTO_OOB;
499 break;
500 case MTD_OPS_RAW:
501 mode = NMBM_MODE_RAW;
502 break;
503 default:
504 pr_debug("%s: unsupported oob mode: %u\n", __func__,
505 ops->mode);
506 return -ENOTSUPP;
507 }
508
509 maxooblen = mtd_oobavail(mtd, ops);
510
511 /* Do not allow write past end of device */
512 if (ops->datbuf && (to + ops->len) > mtd->size) {
513 pr_debug("%s: attempt to write beyond end of device\n",
514 __func__);
515 return -EINVAL;
516 }
517
518 if (!ops->oobbuf) {
519 nmbm_get_device(nm, FL_WRITING);
520
521 /* Optimized for writing data only */
522 ret = nmbm_write_range(nm->ni, to, ops->len, ops->datbuf,
523 mode, &ops->retlen);
524
525 nmbm_release_device(nm);
526
527 return ret;
528 }
529
530 if (unlikely(ops->ooboffs >= maxooblen)) {
531 pr_debug("%s: attempt to start write outside oob\n",
532 __func__);
533 return -EINVAL;
534 }
535
536 if (unlikely(to >= mtd->size ||
537 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
538 (to >> mtd->writesize_shift)) * maxooblen)) {
539 pr_debug("%s: attempt to write beyond end of device\n",
540 __func__);
541 return -EINVAL;
542 }
543
544 nmbm_get_device(nm, FL_WRITING);
545 ret = nmbm_mtd_write_data(nm, to, ops, mode);
546 nmbm_release_device(nm);
547
548 return ret;
549}
550
developerd8cf71c2023-06-20 19:10:04 +0800551static int nmbm_mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
552 size_t *retlen, const u_char *buf)
553{
554 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
555 size_t chklen, wrlen = 0;
556 uint32_t col;
557 int ret;
558
559 col = to & nm->lower->writesize_mask;
560 to &= ~nm->lower->writesize_mask;
561
562 while (len) {
563 /* Move data */
564 chklen = nm->lower->writesize - col;
565 if (chklen > len)
566 chklen = len;
567
568 if (chklen < nm->lower->writesize)
569 memset(nm->page_cache, 0xff, nm->lower->writesize);
570 memcpy(nm->page_cache + col, buf + wrlen, chklen);
571
572 len -= chklen;
573 col = 0; /* (col + chklen) % */
574 wrlen += chklen;
575
576 ret = nmbm_panic_write_single_page(nm->ni, to, nm->page_cache);
577 if (ret)
578 break;
579
580 to += nm->lower->writesize;
581 }
582
583 if (retlen)
584 *retlen = wrlen;
585
586 return 0;
587}
588
developer8d16ac22021-05-26 15:32:12 +0800589static int nmbm_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
590{
591 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
592 int ret;
593
594 nmbm_get_device(nm, FL_READING);
595 ret = nmbm_check_bad_block(nm->ni, offs);
596 nmbm_release_device(nm);
597
598 return ret;
599}
600
601static int nmbm_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
602{
603 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
604 int ret;
605
606 nmbm_get_device(nm, FL_WRITING);
607 ret = nmbm_mark_bad_block(nm->ni, offs);
608 nmbm_release_device(nm);
609
610 return ret;
611}
612
613static void nmbm_mtd_shutdown(struct mtd_info *mtd)
614{
615 struct nmbm_mtd *nm = container_of(mtd, struct nmbm_mtd, upper);
616
617 nmbm_get_device(nm, FL_PM_SUSPENDED);
618}
619
620static int nmbm_probe(struct platform_device *pdev)
621{
622 struct device_node *mtd_np, *np = pdev->dev.of_node;
623 uint32_t max_ratio, max_reserved_blocks, alloc_size;
developer25625b62021-06-16 17:34:42 +0800624 bool forced_create, empty_page_ecc_ok;
developer8d16ac22021-05-26 15:32:12 +0800625 struct nmbm_lower_device nld;
626 struct mtd_info *lower, *mtd;
627 struct nmbm_mtd *nm;
628 const char *mtdname;
developer8d16ac22021-05-26 15:32:12 +0800629 int ret;
630
631 mtd_np = of_parse_phandle(np, "lower-mtd-device", 0);
632 if (mtd_np) {
633 lower = get_mtd_device_by_node(mtd_np);
634 if (!IS_ERR(lower))
635 goto do_attach_mtd;
636
637 dev_dbg(&pdev->dev, "failed to find mtd device by phandle\n");
638 return -EPROBE_DEFER;
639 }
640
641 ret = of_property_read_string(np, "lower-mtd-name", &mtdname);
642 if (!ret) {
643 lower = get_mtd_device_nm(mtdname);
644 if (!IS_ERR(lower))
645 goto do_attach_mtd;
646
647 dev_dbg(&pdev->dev, "failed to find mtd device by name '%s'\n",
648 mtdname);
649 return -EPROBE_DEFER;
650 }
651
652do_attach_mtd:
653 if (of_property_read_u32(np, "max-ratio", &max_ratio))
654 max_ratio = NMBM_MAX_RATIO_DEFAULT;
655
656 if (of_property_read_u32(np, "max-reserved-blocks",
657 &max_reserved_blocks))
658 max_reserved_blocks = NMBM_MAX_BLOCKS_DEFAULT;
659
660 forced_create = of_property_read_bool(np, "forced-create");
developer25625b62021-06-16 17:34:42 +0800661 empty_page_ecc_ok = of_property_read_bool(np,
662 "empty-page-ecc-protected");
developer8d16ac22021-05-26 15:32:12 +0800663
664 memset(&nld, 0, sizeof(nld));
665
developer25625b62021-06-16 17:34:42 +0800666 nld.flags = 0;
667
668 if (forced_create)
669 nld.flags |= NMBM_F_CREATE;
670
671 if (empty_page_ecc_ok)
672 nld.flags |= NMBM_F_EMPTY_PAGE_ECC_OK;
673
developer8d16ac22021-05-26 15:32:12 +0800674 nld.max_ratio = max_ratio;
675 nld.max_reserved_blocks = max_reserved_blocks;
676
677 nld.size = lower->size;
678 nld.erasesize = lower->erasesize;
679 nld.writesize = lower->writesize;
680 nld.oobsize = lower->oobsize;
681 nld.oobavail = lower->oobavail;
682
683 nld.read_page = nmbm_lower_read_page;
684 nld.write_page = nmbm_lower_write_page;
developerd8cf71c2023-06-20 19:10:04 +0800685 nld.panic_write_page = nmbm_lower_panic_write_page;
developer8d16ac22021-05-26 15:32:12 +0800686 nld.erase_block = nmbm_lower_erase_block;
687 nld.is_bad_block = nmbm_lower_is_bad_block;
688 nld.mark_bad_block = nmbm_lower_mark_bad_block;
689
690 nld.logprint = nmbm_lower_log;
691
692 alloc_size = nmbm_calc_structure_size(&nld);
693
694 nm = devm_kzalloc(&pdev->dev, sizeof(*nm) + alloc_size +
695 lower->writesize + lower->oobsize, GFP_KERNEL);
696 if (!nm) {
697 ret = -ENOMEM;
698 goto out;
699 }
700
701 nm->ni = (void *)nm + sizeof(*nm);
702 nm->page_cache = (uint8_t *)nm->ni + alloc_size;
703 nm->lower = lower;
704 nm->dev = &pdev->dev;
705
706 INIT_LIST_HEAD(&nm->node);
707 spin_lock_init(&nm->lock);
708 init_waitqueue_head(&nm->wq);
709
710 nld.arg = nm;
711
712 ret = nmbm_attach(&nld, nm->ni);
713 if (ret)
714 goto out;
715
716 /* Initialize upper mtd */
717 mtd = &nm->upper;
718
719 mtd->owner = THIS_MODULE;
720 mtd->dev.parent = &pdev->dev;
721 mtd->type = lower->type;
722 mtd->flags = lower->flags;
723
724 mtd->size = (uint64_t)nm->ni->data_block_count * lower->erasesize;
725 mtd->erasesize = lower->erasesize;
726 mtd->writesize = lower->writesize;
727 mtd->writebufsize = lower->writesize;
728 mtd->oobsize = lower->oobsize;
729 mtd->oobavail = lower->oobavail;
730
731 mtd->erasesize_shift = lower->erasesize_shift;
732 mtd->writesize_shift = lower->writesize_shift;
733 mtd->erasesize_mask = lower->erasesize_mask;
734 mtd->writesize_mask = lower->writesize_mask;
735
736 mtd->bitflip_threshold = lower->bitflip_threshold;
737
738 mtd->ooblayout = lower->ooblayout;
739
740 mtd->ecc_step_size = lower->ecc_step_size;
741 mtd->ecc_strength = lower->ecc_strength;
742
743 mtd->numeraseregions = lower->numeraseregions;
744 mtd->eraseregions = lower->eraseregions;
745
746 mtd->_erase = nmbm_mtd_erase;
747 mtd->_read_oob = nmbm_mtd_read_oob;
748 mtd->_write_oob = nmbm_mtd_write_oob;
developerd8cf71c2023-06-20 19:10:04 +0800749 mtd->_panic_write = nmbm_mtd_panic_write;
developer8d16ac22021-05-26 15:32:12 +0800750 mtd->_block_isbad = nmbm_mtd_block_isbad;
751 mtd->_block_markbad = nmbm_mtd_block_markbad;
752 mtd->_reboot = nmbm_mtd_shutdown;
753
754 mtd_set_of_node(mtd, np);
755
756 ret = mtd_device_register(mtd, NULL, 0);
757 if (ret) {
758 dev_err(&pdev->dev, "failed to register mtd device\n");
759 nmbm_detach(nm->ni);
760 goto out;
761 }
762
763 platform_set_drvdata(pdev, nm);
764
765 mutex_lock(&nmbm_devs_lock);
766 list_add_tail(&nm->node, &nmbm_devs);
767 mutex_unlock(&nmbm_devs_lock);
768
769 return 0;
770
771out:
772 if (nm)
773 devm_kfree(&pdev->dev, nm);
774
775 put_mtd_device(lower);
776
777 return ret;
778}
779
780static int nmbm_remove(struct platform_device *pdev)
781{
782 struct nmbm_mtd *nm = platform_get_drvdata(pdev);
783 struct mtd_info *lower = nm->lower;
784 int ret;
785
786 ret = mtd_device_unregister(&nm->upper);
787 if (ret)
788 return ret;
789
790 nmbm_detach(nm->ni);
791
792 mutex_lock(&nmbm_devs_lock);
793 list_add_tail(&nm->node, &nmbm_devs);
794 mutex_unlock(&nmbm_devs_lock);
795
796 devm_kfree(&pdev->dev, nm);
797
798 put_mtd_device(lower);
799
800 platform_set_drvdata(pdev, NULL);
801
802 return 0;
803}
804
805static const struct of_device_id nmbm_ids[] = {
806 { .compatible = "generic,nmbm" },
807 { },
808};
809
810MODULE_DEVICE_TABLE(of, nmbm_ids);
811
812static struct platform_driver nmbm_driver = {
813 .probe = nmbm_probe,
814 .remove = nmbm_remove,
815 .driver = {
816 .name = "nmbm",
817 .of_match_table = nmbm_ids,
818 },
819};
820
821static int __init nmbm_init(void)
822{
823 int ret;
824
825 INIT_LIST_HEAD(&nmbm_devs);
826
827 ret = platform_driver_register(&nmbm_driver);
828 if (ret) {
829 pr_err("failed to register nmbm driver\n");
830 return ret;
831 }
832
833 return 0;
834}
835module_init(nmbm_init);
836
837static void __exit nmbm_exit(void)
838{
839 platform_driver_unregister(&nmbm_driver);
840}
841module_exit(nmbm_exit);
842
843MODULE_LICENSE("GPL");
844MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
845MODULE_DESCRIPTION("NAND mapping block management");