blob: 592f58dcd36c6a48d55827057a111dc8d3880a12 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Stefan Roesee0860d42009-05-12 14:29:39 +02002/*
3 * MTD device concatenation layer
4 *
Heiko Schocherf5895d12014-06-24 10:10:04 +02005 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
Stefan Roesee0860d42009-05-12 14:29:39 +02007 *
8 * NAND support by Christian Gan <cgan@iders.ca>
9 *
Stefan Roesee0860d42009-05-12 14:29:39 +020010 */
11
Heiko Schocherf5895d12014-06-24 10:10:04 +020012#ifndef __UBOOT__
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/sched.h>
17#include <linux/types.h>
18#include <linux/backing-dev.h>
19#include <asm/div64.h>
20#else
21#include <div64.h>
Mike Frysinger11d1a092012-04-09 13:39:55 +000022#include <linux/compat.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020023#endif
24
25#include <linux/mtd/mtd.h>
Stefan Roesee0860d42009-05-12 14:29:39 +020026#include <linux/mtd/concat.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020027
Stefan Roesee0860d42009-05-12 14:29:39 +020028#include <ubi_uboot.h>
29
30/*
31 * Our storage structure:
32 * Subdev points to an array of pointers to struct mtd_info objects
33 * which is allocated along with this structure
34 *
35 */
36struct mtd_concat {
37 struct mtd_info mtd;
38 int num_subdev;
39 struct mtd_info **subdev;
40};
41
42/*
43 * how to calculate the size required for the above structure,
44 * including the pointer array subdev points to:
45 */
46#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
47 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
48
49/*
50 * Given a pointer to the MTD object in the mtd_concat structure,
51 * we can retrieve the pointer to that structure with this macro.
52 */
53#define CONCAT(x) ((struct mtd_concat *)(x))
54
55/*
56 * MTD methods which look up the relevant subdevice, translate the
57 * effective address and pass through to the subdevice.
58 */
59
60static int
61concat_read(struct mtd_info *mtd, loff_t from, size_t len,
62 size_t * retlen, u_char * buf)
63{
64 struct mtd_concat *concat = CONCAT(mtd);
65 int ret = 0, err;
66 int i;
67
Heiko Schocherf5895d12014-06-24 10:10:04 +020068#ifdef __UBOOT__
Stefan Roesee0860d42009-05-12 14:29:39 +020069 *retlen = 0;
Heiko Schocherf5895d12014-06-24 10:10:04 +020070#endif
Stefan Roesee0860d42009-05-12 14:29:39 +020071
72 for (i = 0; i < concat->num_subdev; i++) {
73 struct mtd_info *subdev = concat->subdev[i];
74 size_t size, retsize;
75
76 if (from >= subdev->size) {
77 /* Not destined for this subdev */
78 size = 0;
79 from -= subdev->size;
80 continue;
81 }
82 if (from + len > subdev->size)
83 /* First part goes into this subdev */
84 size = subdev->size - from;
85 else
86 /* Entire transaction goes into this subdev */
87 size = len;
88
Sergey Lapin3a38a552013-01-14 03:46:50 +000089 err = mtd_read(subdev, from, size, &retsize, buf);
Stefan Roesee0860d42009-05-12 14:29:39 +020090
91 /* Save information about bitflips! */
92 if (unlikely(err)) {
Sergey Lapin3a38a552013-01-14 03:46:50 +000093 if (mtd_is_eccerr(err)) {
Stefan Roesee0860d42009-05-12 14:29:39 +020094 mtd->ecc_stats.failed++;
95 ret = err;
Sergey Lapin3a38a552013-01-14 03:46:50 +000096 } else if (mtd_is_bitflip(err)) {
Stefan Roesee0860d42009-05-12 14:29:39 +020097 mtd->ecc_stats.corrected++;
98 /* Do not overwrite -EBADMSG !! */
99 if (!ret)
100 ret = err;
101 } else
102 return err;
103 }
104
105 *retlen += retsize;
106 len -= size;
107 if (len == 0)
108 return ret;
109
110 buf += size;
111 from = 0;
112 }
113 return -EINVAL;
114}
115
116static int
117concat_write(struct mtd_info *mtd, loff_t to, size_t len,
118 size_t * retlen, const u_char * buf)
119{
120 struct mtd_concat *concat = CONCAT(mtd);
121 int err = -EINVAL;
122 int i;
123
Heiko Schocherf5895d12014-06-24 10:10:04 +0200124#ifdef __UBOOT__
Stefan Roesee0860d42009-05-12 14:29:39 +0200125 *retlen = 0;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200126#endif
Stefan Roesee0860d42009-05-12 14:29:39 +0200127
128 for (i = 0; i < concat->num_subdev; i++) {
129 struct mtd_info *subdev = concat->subdev[i];
130 size_t size, retsize;
131
132 if (to >= subdev->size) {
133 size = 0;
134 to -= subdev->size;
135 continue;
136 }
137 if (to + len > subdev->size)
138 size = subdev->size - to;
139 else
140 size = len;
141
Sergey Lapin3a38a552013-01-14 03:46:50 +0000142 err = mtd_write(subdev, to, size, &retsize, buf);
Stefan Roesee0860d42009-05-12 14:29:39 +0200143 if (err)
144 break;
145
146 *retlen += retsize;
147 len -= size;
148 if (len == 0)
149 break;
150
151 err = -EINVAL;
152 buf += size;
153 to = 0;
154 }
Heiko Schocherf5895d12014-06-24 10:10:04 +0200155 return err;
156}
157
158#ifndef __UBOOT__
159static int
160concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
161 unsigned long count, loff_t to, size_t * retlen)
162{
163 struct mtd_concat *concat = CONCAT(mtd);
164 struct kvec *vecs_copy;
165 unsigned long entry_low, entry_high;
166 size_t total_len = 0;
167 int i;
168 int err = -EINVAL;
169
170 /* Calculate total length of data */
171 for (i = 0; i < count; i++)
172 total_len += vecs[i].iov_len;
173
174 /* Check alignment */
175 if (mtd->writesize > 1) {
176 uint64_t __to = to;
177 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
178 return -EINVAL;
179 }
180
181 /* make a copy of vecs */
182 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
183 if (!vecs_copy)
184 return -ENOMEM;
185
186 entry_low = 0;
187 for (i = 0; i < concat->num_subdev; i++) {
188 struct mtd_info *subdev = concat->subdev[i];
189 size_t size, wsize, retsize, old_iov_len;
190
191 if (to >= subdev->size) {
192 to -= subdev->size;
193 continue;
194 }
195
196 size = min_t(uint64_t, total_len, subdev->size - to);
197 wsize = size; /* store for future use */
198
199 entry_high = entry_low;
200 while (entry_high < count) {
201 if (size <= vecs_copy[entry_high].iov_len)
202 break;
203 size -= vecs_copy[entry_high++].iov_len;
204 }
205
206 old_iov_len = vecs_copy[entry_high].iov_len;
207 vecs_copy[entry_high].iov_len = size;
208
209 err = mtd_writev(subdev, &vecs_copy[entry_low],
210 entry_high - entry_low + 1, to, &retsize);
211
212 vecs_copy[entry_high].iov_len = old_iov_len - size;
213 vecs_copy[entry_high].iov_base += size;
214
215 entry_low = entry_high;
216
217 if (err)
218 break;
219
220 *retlen += retsize;
221 total_len -= wsize;
222
223 if (total_len == 0)
224 break;
225
226 err = -EINVAL;
227 to = 0;
228 }
229
230 kfree(vecs_copy);
Stefan Roesee0860d42009-05-12 14:29:39 +0200231 return err;
232}
Heiko Schocherf5895d12014-06-24 10:10:04 +0200233#endif
Stefan Roesee0860d42009-05-12 14:29:39 +0200234
235static int
236concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
237{
238 struct mtd_concat *concat = CONCAT(mtd);
239 struct mtd_oob_ops devops = *ops;
240 int i, err, ret = 0;
241
242 ops->retlen = ops->oobretlen = 0;
243
244 for (i = 0; i < concat->num_subdev; i++) {
245 struct mtd_info *subdev = concat->subdev[i];
246
247 if (from >= subdev->size) {
248 from -= subdev->size;
249 continue;
250 }
251
252 /* partial read ? */
253 if (from + devops.len > subdev->size)
254 devops.len = subdev->size - from;
255
Sergey Lapin3a38a552013-01-14 03:46:50 +0000256 err = mtd_read_oob(subdev, from, &devops);
Stefan Roesee0860d42009-05-12 14:29:39 +0200257 ops->retlen += devops.retlen;
258 ops->oobretlen += devops.oobretlen;
259
260 /* Save information about bitflips! */
261 if (unlikely(err)) {
Sergey Lapin3a38a552013-01-14 03:46:50 +0000262 if (mtd_is_eccerr(err)) {
Stefan Roesee0860d42009-05-12 14:29:39 +0200263 mtd->ecc_stats.failed++;
264 ret = err;
Sergey Lapin3a38a552013-01-14 03:46:50 +0000265 } else if (mtd_is_bitflip(err)) {
Stefan Roesee0860d42009-05-12 14:29:39 +0200266 mtd->ecc_stats.corrected++;
267 /* Do not overwrite -EBADMSG !! */
268 if (!ret)
269 ret = err;
270 } else
271 return err;
272 }
273
274 if (devops.datbuf) {
275 devops.len = ops->len - ops->retlen;
276 if (!devops.len)
277 return ret;
278 devops.datbuf += devops.retlen;
279 }
280 if (devops.oobbuf) {
281 devops.ooblen = ops->ooblen - ops->oobretlen;
282 if (!devops.ooblen)
283 return ret;
284 devops.oobbuf += ops->oobretlen;
285 }
286
287 from = 0;
288 }
289 return -EINVAL;
290}
291
292static int
293concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
294{
295 struct mtd_concat *concat = CONCAT(mtd);
296 struct mtd_oob_ops devops = *ops;
297 int i, err;
298
299 if (!(mtd->flags & MTD_WRITEABLE))
300 return -EROFS;
301
Heiko Schocherf5895d12014-06-24 10:10:04 +0200302 ops->retlen = ops->oobretlen = 0;
Stefan Roesee0860d42009-05-12 14:29:39 +0200303
304 for (i = 0; i < concat->num_subdev; i++) {
305 struct mtd_info *subdev = concat->subdev[i];
306
307 if (to >= subdev->size) {
308 to -= subdev->size;
309 continue;
310 }
311
312 /* partial write ? */
313 if (to + devops.len > subdev->size)
314 devops.len = subdev->size - to;
315
Sergey Lapin3a38a552013-01-14 03:46:50 +0000316 err = mtd_write_oob(subdev, to, &devops);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200317 ops->retlen += devops.oobretlen;
Stefan Roesee0860d42009-05-12 14:29:39 +0200318 if (err)
319 return err;
320
321 if (devops.datbuf) {
322 devops.len = ops->len - ops->retlen;
323 if (!devops.len)
324 return 0;
325 devops.datbuf += devops.retlen;
326 }
327 if (devops.oobbuf) {
328 devops.ooblen = ops->ooblen - ops->oobretlen;
329 if (!devops.ooblen)
330 return 0;
331 devops.oobbuf += devops.oobretlen;
332 }
333 to = 0;
334 }
335 return -EINVAL;
336}
337
338static void concat_erase_callback(struct erase_info *instr)
339{
340 /* Nothing to do here in U-Boot */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200341#ifndef __UBOOT__
342 wake_up((wait_queue_head_t *) instr->priv);
343#endif
Stefan Roesee0860d42009-05-12 14:29:39 +0200344}
345
346static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
347{
348 int err;
349 wait_queue_head_t waitq;
350 DECLARE_WAITQUEUE(wait, current);
351
352 /*
353 * This code was stol^H^H^H^Hinspired by mtdchar.c
354 */
355 init_waitqueue_head(&waitq);
356
357 erase->mtd = mtd;
358 erase->callback = concat_erase_callback;
359 erase->priv = (unsigned long) &waitq;
360
361 /*
362 * FIXME: Allow INTERRUPTIBLE. Which means
363 * not having the wait_queue head on the stack.
364 */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000365 err = mtd_erase(mtd, erase);
Stefan Roesee0860d42009-05-12 14:29:39 +0200366 if (!err) {
367 set_current_state(TASK_UNINTERRUPTIBLE);
368 add_wait_queue(&waitq, &wait);
369 if (erase->state != MTD_ERASE_DONE
370 && erase->state != MTD_ERASE_FAILED)
371 schedule();
372 remove_wait_queue(&waitq, &wait);
373 set_current_state(TASK_RUNNING);
374
375 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
376 }
377 return err;
378}
379
380static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
381{
382 struct mtd_concat *concat = CONCAT(mtd);
383 struct mtd_info *subdev;
384 int i, err;
385 uint64_t length, offset = 0;
386 struct erase_info *erase;
387
Stefan Roesee0860d42009-05-12 14:29:39 +0200388 /*
389 * Check for proper erase block alignment of the to-be-erased area.
390 * It is easier to do this based on the super device's erase
391 * region info rather than looking at each particular sub-device
392 * in turn.
393 */
394 if (!concat->mtd.numeraseregions) {
395 /* the easy case: device has uniform erase block size */
396 if (instr->addr & (concat->mtd.erasesize - 1))
397 return -EINVAL;
398 if (instr->len & (concat->mtd.erasesize - 1))
399 return -EINVAL;
400 } else {
401 /* device has variable erase size */
402 struct mtd_erase_region_info *erase_regions =
403 concat->mtd.eraseregions;
404
405 /*
406 * Find the erase region where the to-be-erased area begins:
407 */
408 for (i = 0; i < concat->mtd.numeraseregions &&
409 instr->addr >= erase_regions[i].offset; i++) ;
410 --i;
411
412 /*
413 * Now erase_regions[i] is the region in which the
414 * to-be-erased area begins. Verify that the starting
415 * offset is aligned to this region's erase size:
416 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200417 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
Stefan Roesee0860d42009-05-12 14:29:39 +0200418 return -EINVAL;
419
420 /*
421 * now find the erase region where the to-be-erased area ends:
422 */
423 for (; i < concat->mtd.numeraseregions &&
424 (instr->addr + instr->len) >= erase_regions[i].offset;
425 ++i) ;
426 --i;
427 /*
428 * check if the ending offset is aligned to this region's erase size
429 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200430 if (i < 0 || ((instr->addr + instr->len) &
431 (erase_regions[i].erasesize - 1)))
Stefan Roesee0860d42009-05-12 14:29:39 +0200432 return -EINVAL;
433 }
434
Stefan Roesee0860d42009-05-12 14:29:39 +0200435 /* make a local copy of instr to avoid modifying the caller's struct */
436 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
437
438 if (!erase)
439 return -ENOMEM;
440
441 *erase = *instr;
442 length = instr->len;
443
444 /*
445 * find the subdevice where the to-be-erased area begins, adjust
446 * starting offset to be relative to the subdevice start
447 */
448 for (i = 0; i < concat->num_subdev; i++) {
449 subdev = concat->subdev[i];
450 if (subdev->size <= erase->addr) {
451 erase->addr -= subdev->size;
452 offset += subdev->size;
453 } else {
454 break;
455 }
456 }
457
458 /* must never happen since size limit has been verified above */
459 BUG_ON(i >= concat->num_subdev);
460
461 /* now do the erase: */
462 err = 0;
463 for (; length > 0; i++) {
464 /* loop for all subdevices affected by this request */
465 subdev = concat->subdev[i]; /* get current subdevice */
466
467 /* limit length to subdevice's size: */
468 if (erase->addr + length > subdev->size)
469 erase->len = subdev->size - erase->addr;
470 else
471 erase->len = length;
472
Stefan Roesee0860d42009-05-12 14:29:39 +0200473 length -= erase->len;
474 if ((err = concat_dev_erase(subdev, erase))) {
475 /* sanity check: should never happen since
476 * block alignment has been checked above */
477 BUG_ON(err == -EINVAL);
478 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
479 instr->fail_addr = erase->fail_addr + offset;
480 break;
481 }
482 /*
483 * erase->addr specifies the offset of the area to be
484 * erased *within the current subdevice*. It can be
485 * non-zero only the first time through this loop, i.e.
486 * for the first subdevice where blocks need to be erased.
487 * All the following erases must begin at the start of the
488 * current subdevice, i.e. at offset zero.
489 */
490 erase->addr = 0;
491 offset += subdev->size;
492 }
493 instr->state = erase->state;
494 kfree(erase);
495 if (err)
496 return err;
497
498 if (instr->callback)
499 instr->callback(instr);
500 return 0;
501}
502
503static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
504{
505 struct mtd_concat *concat = CONCAT(mtd);
506 int i, err = -EINVAL;
507
Stefan Roesee0860d42009-05-12 14:29:39 +0200508 for (i = 0; i < concat->num_subdev; i++) {
509 struct mtd_info *subdev = concat->subdev[i];
510 uint64_t size;
511
512 if (ofs >= subdev->size) {
513 size = 0;
514 ofs -= subdev->size;
515 continue;
516 }
517 if (ofs + len > subdev->size)
518 size = subdev->size - ofs;
519 else
520 size = len;
521
Sergey Lapin3a38a552013-01-14 03:46:50 +0000522 err = mtd_lock(subdev, ofs, size);
Stefan Roesee0860d42009-05-12 14:29:39 +0200523 if (err)
524 break;
525
526 len -= size;
527 if (len == 0)
528 break;
529
530 err = -EINVAL;
531 ofs = 0;
532 }
533
534 return err;
535}
536
537static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
538{
539 struct mtd_concat *concat = CONCAT(mtd);
540 int i, err = 0;
541
Stefan Roesee0860d42009-05-12 14:29:39 +0200542 for (i = 0; i < concat->num_subdev; i++) {
543 struct mtd_info *subdev = concat->subdev[i];
544 uint64_t size;
545
546 if (ofs >= subdev->size) {
547 size = 0;
548 ofs -= subdev->size;
549 continue;
550 }
551 if (ofs + len > subdev->size)
552 size = subdev->size - ofs;
553 else
554 size = len;
555
Sergey Lapin3a38a552013-01-14 03:46:50 +0000556 err = mtd_unlock(subdev, ofs, size);
Stefan Roesee0860d42009-05-12 14:29:39 +0200557 if (err)
558 break;
559
560 len -= size;
561 if (len == 0)
562 break;
563
564 err = -EINVAL;
565 ofs = 0;
566 }
567
568 return err;
569}
570
571static void concat_sync(struct mtd_info *mtd)
572{
573 struct mtd_concat *concat = CONCAT(mtd);
574 int i;
575
576 for (i = 0; i < concat->num_subdev; i++) {
577 struct mtd_info *subdev = concat->subdev[i];
Sergey Lapin3a38a552013-01-14 03:46:50 +0000578 mtd_sync(subdev);
Stefan Roesee0860d42009-05-12 14:29:39 +0200579 }
580}
581
Heiko Schocherf5895d12014-06-24 10:10:04 +0200582#ifndef __UBOOT__
583static int concat_suspend(struct mtd_info *mtd)
584{
585 struct mtd_concat *concat = CONCAT(mtd);
586 int i, rc = 0;
587
588 for (i = 0; i < concat->num_subdev; i++) {
589 struct mtd_info *subdev = concat->subdev[i];
590 if ((rc = mtd_suspend(subdev)) < 0)
591 return rc;
592 }
593 return rc;
594}
595
596static void concat_resume(struct mtd_info *mtd)
597{
598 struct mtd_concat *concat = CONCAT(mtd);
599 int i;
600
601 for (i = 0; i < concat->num_subdev; i++) {
602 struct mtd_info *subdev = concat->subdev[i];
603 mtd_resume(subdev);
604 }
605}
606#endif
607
Stefan Roesee0860d42009-05-12 14:29:39 +0200608static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
609{
610 struct mtd_concat *concat = CONCAT(mtd);
611 int i, res = 0;
612
Sergey Lapin3a38a552013-01-14 03:46:50 +0000613 if (!mtd_can_have_bb(concat->subdev[0]))
Stefan Roesee0860d42009-05-12 14:29:39 +0200614 return res;
615
Stefan Roesee0860d42009-05-12 14:29:39 +0200616 for (i = 0; i < concat->num_subdev; i++) {
617 struct mtd_info *subdev = concat->subdev[i];
618
619 if (ofs >= subdev->size) {
620 ofs -= subdev->size;
621 continue;
622 }
623
Sergey Lapin3a38a552013-01-14 03:46:50 +0000624 res = mtd_block_isbad(subdev, ofs);
Stefan Roesee0860d42009-05-12 14:29:39 +0200625 break;
626 }
627
628 return res;
629}
630
631static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
632{
633 struct mtd_concat *concat = CONCAT(mtd);
634 int i, err = -EINVAL;
635
Stefan Roesee0860d42009-05-12 14:29:39 +0200636 for (i = 0; i < concat->num_subdev; i++) {
637 struct mtd_info *subdev = concat->subdev[i];
638
639 if (ofs >= subdev->size) {
640 ofs -= subdev->size;
641 continue;
642 }
643
Sergey Lapin3a38a552013-01-14 03:46:50 +0000644 err = mtd_block_markbad(subdev, ofs);
Stefan Roesee0860d42009-05-12 14:29:39 +0200645 if (!err)
646 mtd->ecc_stats.badblocks++;
647 break;
648 }
649
650 return err;
651}
652
653/*
Heiko Schocherf5895d12014-06-24 10:10:04 +0200654 * try to support NOMMU mmaps on concatenated devices
655 * - we don't support subdev spanning as we can't guarantee it'll work
656 */
657static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
658 unsigned long len,
659 unsigned long offset,
660 unsigned long flags)
661{
662 struct mtd_concat *concat = CONCAT(mtd);
663 int i;
664
665 for (i = 0; i < concat->num_subdev; i++) {
666 struct mtd_info *subdev = concat->subdev[i];
667
668 if (offset >= subdev->size) {
669 offset -= subdev->size;
670 continue;
671 }
672
673 return mtd_get_unmapped_area(subdev, len, offset, flags);
674 }
675
676 return (unsigned long) -ENOSYS;
677}
678
679/*
Stefan Roesee0860d42009-05-12 14:29:39 +0200680 * This function constructs a virtual MTD device by concatenating
681 * num_devs MTD devices. A pointer to the new device object is
682 * stored to *new_dev upon success. This function does _not_
683 * register any devices: this is the caller's responsibility.
684 */
685struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
686 int num_devs, /* number of subdevices */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200687#ifndef __UBOOT__
Stefan Roesee0860d42009-05-12 14:29:39 +0200688 const char *name)
Heiko Schocherf5895d12014-06-24 10:10:04 +0200689#else
690 char *name)
691#endif
Stefan Roesee0860d42009-05-12 14:29:39 +0200692{ /* name for the new device */
693 int i;
694 size_t size;
695 struct mtd_concat *concat;
696 uint32_t max_erasesize, curr_erasesize;
697 int num_erase_region;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200698 int max_writebufsize = 0;
Stefan Roesee0860d42009-05-12 14:29:39 +0200699
700 debug("Concatenating MTD devices:\n");
701 for (i = 0; i < num_devs; i++)
Heiko Schocherf5895d12014-06-24 10:10:04 +0200702 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
Stefan Roesee0860d42009-05-12 14:29:39 +0200703 debug("into device \"%s\"\n", name);
704
705 /* allocate the device structure */
706 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
707 concat = kzalloc(size, GFP_KERNEL);
708 if (!concat) {
709 printk
710 ("memory allocation error while creating concatenated device \"%s\"\n",
711 name);
712 return NULL;
713 }
714 concat->subdev = (struct mtd_info **) (concat + 1);
715
716 /*
717 * Set up the new "super" device's MTD object structure, check for
Heiko Schocherf5895d12014-06-24 10:10:04 +0200718 * incompatibilities between the subdevices.
Stefan Roesee0860d42009-05-12 14:29:39 +0200719 */
720 concat->mtd.type = subdev[0]->type;
721 concat->mtd.flags = subdev[0]->flags;
722 concat->mtd.size = subdev[0]->size;
723 concat->mtd.erasesize = subdev[0]->erasesize;
724 concat->mtd.writesize = subdev[0]->writesize;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200725
726 for (i = 0; i < num_devs; i++)
727 if (max_writebufsize < subdev[i]->writebufsize)
728 max_writebufsize = subdev[i]->writebufsize;
729 concat->mtd.writebufsize = max_writebufsize;
730
Stefan Roesee0860d42009-05-12 14:29:39 +0200731 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
732 concat->mtd.oobsize = subdev[0]->oobsize;
733 concat->mtd.oobavail = subdev[0]->oobavail;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200734#ifndef __UBOOT__
735 if (subdev[0]->_writev)
736 concat->mtd._writev = concat_writev;
737#endif
Sergey Lapin3a38a552013-01-14 03:46:50 +0000738 if (subdev[0]->_read_oob)
739 concat->mtd._read_oob = concat_read_oob;
740 if (subdev[0]->_write_oob)
741 concat->mtd._write_oob = concat_write_oob;
742 if (subdev[0]->_block_isbad)
743 concat->mtd._block_isbad = concat_block_isbad;
744 if (subdev[0]->_block_markbad)
745 concat->mtd._block_markbad = concat_block_markbad;
Stefan Roesee0860d42009-05-12 14:29:39 +0200746
747 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
748
Heiko Schocherf5895d12014-06-24 10:10:04 +0200749#ifndef __UBOOT__
750 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
751#endif
752
Stefan Roesee0860d42009-05-12 14:29:39 +0200753 concat->subdev[0] = subdev[0];
754
755 for (i = 1; i < num_devs; i++) {
756 if (concat->mtd.type != subdev[i]->type) {
757 kfree(concat);
758 printk("Incompatible device type on \"%s\"\n",
759 subdev[i]->name);
760 return NULL;
761 }
762 if (concat->mtd.flags != subdev[i]->flags) {
763 /*
764 * Expect all flags except MTD_WRITEABLE to be
765 * equal on all subdevices.
766 */
767 if ((concat->mtd.flags ^ subdev[i]->
768 flags) & ~MTD_WRITEABLE) {
769 kfree(concat);
770 printk("Incompatible device flags on \"%s\"\n",
771 subdev[i]->name);
772 return NULL;
773 } else
774 /* if writeable attribute differs,
775 make super device writeable */
776 concat->mtd.flags |=
777 subdev[i]->flags & MTD_WRITEABLE;
778 }
779
Heiko Schocherf5895d12014-06-24 10:10:04 +0200780#ifndef __UBOOT__
781 /* only permit direct mapping if the BDIs are all the same
782 * - copy-mapping is still permitted
783 */
784 if (concat->mtd.backing_dev_info !=
785 subdev[i]->backing_dev_info)
786 concat->mtd.backing_dev_info =
787 &default_backing_dev_info;
788#endif
789
Stefan Roesee0860d42009-05-12 14:29:39 +0200790 concat->mtd.size += subdev[i]->size;
791 concat->mtd.ecc_stats.badblocks +=
792 subdev[i]->ecc_stats.badblocks;
793 if (concat->mtd.writesize != subdev[i]->writesize ||
794 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
795 concat->mtd.oobsize != subdev[i]->oobsize ||
Sergey Lapin3a38a552013-01-14 03:46:50 +0000796 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
797 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
Stefan Roesee0860d42009-05-12 14:29:39 +0200798 kfree(concat);
799 printk("Incompatible OOB or ECC data on \"%s\"\n",
800 subdev[i]->name);
801 return NULL;
802 }
803 concat->subdev[i] = subdev[i];
804
805 }
806
807 concat->mtd.ecclayout = subdev[0]->ecclayout;
808
809 concat->num_subdev = num_devs;
810 concat->mtd.name = name;
811
Sergey Lapin3a38a552013-01-14 03:46:50 +0000812 concat->mtd._erase = concat_erase;
813 concat->mtd._read = concat_read;
814 concat->mtd._write = concat_write;
815 concat->mtd._sync = concat_sync;
816 concat->mtd._lock = concat_lock;
817 concat->mtd._unlock = concat_unlock;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200818#ifndef __UBOOT__
819 concat->mtd._suspend = concat_suspend;
820 concat->mtd._resume = concat_resume;
821#endif
822 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
Stefan Roesee0860d42009-05-12 14:29:39 +0200823
824 /*
825 * Combine the erase block size info of the subdevices:
826 *
827 * first, walk the map of the new device and see how
828 * many changes in erase size we have
829 */
830 max_erasesize = curr_erasesize = subdev[0]->erasesize;
831 num_erase_region = 1;
832 for (i = 0; i < num_devs; i++) {
833 if (subdev[i]->numeraseregions == 0) {
834 /* current subdevice has uniform erase size */
835 if (subdev[i]->erasesize != curr_erasesize) {
836 /* if it differs from the last subdevice's erase size, count it */
837 ++num_erase_region;
838 curr_erasesize = subdev[i]->erasesize;
839 if (curr_erasesize > max_erasesize)
840 max_erasesize = curr_erasesize;
841 }
842 } else {
843 /* current subdevice has variable erase size */
844 int j;
845 for (j = 0; j < subdev[i]->numeraseregions; j++) {
846
847 /* walk the list of erase regions, count any changes */
848 if (subdev[i]->eraseregions[j].erasesize !=
849 curr_erasesize) {
850 ++num_erase_region;
851 curr_erasesize =
852 subdev[i]->eraseregions[j].
853 erasesize;
854 if (curr_erasesize > max_erasesize)
855 max_erasesize = curr_erasesize;
856 }
857 }
858 }
859 }
860
861 if (num_erase_region == 1) {
862 /*
863 * All subdevices have the same uniform erase size.
864 * This is easy:
865 */
866 concat->mtd.erasesize = curr_erasesize;
867 concat->mtd.numeraseregions = 0;
868 } else {
869 uint64_t tmp64;
870
871 /*
872 * erase block size varies across the subdevices: allocate
873 * space to store the data describing the variable erase regions
874 */
875 struct mtd_erase_region_info *erase_region_p;
876 uint64_t begin, position;
877
878 concat->mtd.erasesize = max_erasesize;
879 concat->mtd.numeraseregions = num_erase_region;
880 concat->mtd.eraseregions = erase_region_p =
881 kmalloc(num_erase_region *
882 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
883 if (!erase_region_p) {
884 kfree(concat);
885 printk
886 ("memory allocation error while creating erase region list"
887 " for device \"%s\"\n", name);
888 return NULL;
889 }
890
891 /*
892 * walk the map of the new device once more and fill in
893 * in erase region info:
894 */
895 curr_erasesize = subdev[0]->erasesize;
896 begin = position = 0;
897 for (i = 0; i < num_devs; i++) {
898 if (subdev[i]->numeraseregions == 0) {
899 /* current subdevice has uniform erase size */
900 if (subdev[i]->erasesize != curr_erasesize) {
901 /*
902 * fill in an mtd_erase_region_info structure for the area
903 * we have walked so far:
904 */
905 erase_region_p->offset = begin;
906 erase_region_p->erasesize =
907 curr_erasesize;
908 tmp64 = position - begin;
909 do_div(tmp64, curr_erasesize);
910 erase_region_p->numblocks = tmp64;
911 begin = position;
912
913 curr_erasesize = subdev[i]->erasesize;
914 ++erase_region_p;
915 }
916 position += subdev[i]->size;
917 } else {
918 /* current subdevice has variable erase size */
919 int j;
920 for (j = 0; j < subdev[i]->numeraseregions; j++) {
921 /* walk the list of erase regions, count any changes */
922 if (subdev[i]->eraseregions[j].
923 erasesize != curr_erasesize) {
924 erase_region_p->offset = begin;
925 erase_region_p->erasesize =
926 curr_erasesize;
927 tmp64 = position - begin;
928 do_div(tmp64, curr_erasesize);
929 erase_region_p->numblocks = tmp64;
930 begin = position;
931
932 curr_erasesize =
933 subdev[i]->eraseregions[j].
934 erasesize;
935 ++erase_region_p;
936 }
937 position +=
938 subdev[i]->eraseregions[j].
939 numblocks * (uint64_t)curr_erasesize;
940 }
941 }
942 }
943 /* Now write the final entry */
944 erase_region_p->offset = begin;
945 erase_region_p->erasesize = curr_erasesize;
946 tmp64 = position - begin;
947 do_div(tmp64, curr_erasesize);
948 erase_region_p->numblocks = tmp64;
949 }
950
951 return &concat->mtd;
952}
Heiko Schocherf5895d12014-06-24 10:10:04 +0200953
954/*
955 * This function destroys an MTD object obtained from concat_mtd_devs()
956 */
957
958void mtd_concat_destroy(struct mtd_info *mtd)
959{
960 struct mtd_concat *concat = CONCAT(mtd);
961 if (concat->mtd.numeraseregions)
962 kfree(concat->mtd.eraseregions);
963 kfree(concat);
964}
965
966EXPORT_SYMBOL(mtd_concat_create);
967EXPORT_SYMBOL(mtd_concat_destroy);
968
969MODULE_LICENSE("GPL");
970MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
971MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");