blob: 1a4dec34d93ebb85c5a169408d966b0943393488 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kyungmin Parkf6d5e252008-11-19 16:20:36 +01002/*
3 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
Heiko Schocherf5895d12014-06-24 10:10:04 +02006 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
8 *
Kyungmin Parkf6d5e252008-11-19 16:20:36 +01009 */
10
Heiko Schocherf5895d12014-06-24 10:10:04 +020011#ifndef __UBOOT__
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070013#include <dm/devres.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020014#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/ptrace.h>
17#include <linux/seq_file.h>
18#include <linux/string.h>
19#include <linux/timer.h>
20#include <linux/major.h>
21#include <linux/fs.h>
22#include <linux/err.h>
23#include <linux/ioctl.h>
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/idr.h>
27#include <linux/backing-dev.h>
28#include <linux/gfp.h>
29#include <linux/slab.h>
30#else
Simon Glass4dcacfc2020-05-10 11:40:13 -060031#include <linux/bitops.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060032#include <linux/bug.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020033#include <linux/err.h>
Kyungmin Parkf6d5e252008-11-19 16:20:36 +010034#include <ubi_uboot.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020035#endif
36
Fabio Estevam0297d1e2015-11-05 12:43:39 -020037#include <linux/log2.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020038#include <linux/mtd/mtd.h>
39#include <linux/mtd/partitions.h>
40
41#include "mtdcore.h"
42
43#ifndef __UBOOT__
44/*
45 * backing device capabilities for non-mappable devices (such as NAND flash)
46 * - permits private mappings, copies are taken of the data
47 */
48static struct backing_dev_info mtd_bdi_unmappable = {
49 .capabilities = BDI_CAP_MAP_COPY,
50};
51
52/*
53 * backing device capabilities for R/O mappable devices (such as ROM)
54 * - permits private mappings, copies are taken of the data
55 * - permits non-writable shared mappings
56 */
57static struct backing_dev_info mtd_bdi_ro_mappable = {
58 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
59 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
60};
61
62/*
63 * backing device capabilities for writable mappable devices (such as RAM)
64 * - permits private mappings, copies are taken of the data
65 * - permits non-writable shared mappings
66 */
67static struct backing_dev_info mtd_bdi_rw_mappable = {
68 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
69 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
70 BDI_CAP_WRITE_MAP),
71};
72
73static int mtd_cls_suspend(struct device *dev, pm_message_t state);
74static int mtd_cls_resume(struct device *dev);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +010075
Heiko Schocherf5895d12014-06-24 10:10:04 +020076static struct class mtd_class = {
77 .name = "mtd",
78 .owner = THIS_MODULE,
79 .suspend = mtd_cls_suspend,
80 .resume = mtd_cls_resume,
81};
82#else
Heiko Schocherf5895d12014-06-24 10:10:04 +020083#define MAX_IDR_ID 64
84
85struct idr_layer {
86 int used;
87 void *ptr;
88};
89
90struct idr {
91 struct idr_layer id[MAX_IDR_ID];
Boris Brezillon059e4a62018-12-02 10:54:22 +010092 bool updated;
Heiko Schocherf5895d12014-06-24 10:10:04 +020093};
94
95#define DEFINE_IDR(name) struct idr name;
96
97void idr_remove(struct idr *idp, int id)
98{
Boris Brezillon059e4a62018-12-02 10:54:22 +010099 if (idp->id[id].used) {
Heiko Schocherf5895d12014-06-24 10:10:04 +0200100 idp->id[id].used = 0;
Boris Brezillon059e4a62018-12-02 10:54:22 +0100101 idp->updated = true;
102 }
Heiko Schocherf5895d12014-06-24 10:10:04 +0200103
104 return;
105}
106void *idr_find(struct idr *idp, int id)
107{
108 if (idp->id[id].used)
109 return idp->id[id].ptr;
110
111 return NULL;
112}
113
114void *idr_get_next(struct idr *idp, int *next)
115{
116 void *ret;
117 int id = *next;
118
119 ret = idr_find(idp, id);
120 if (ret) {
121 id ++;
122 if (!idp->id[id].used)
123 id = 0;
124 *next = id;
125 } else {
126 *next = 0;
127 }
128
129 return ret;
130}
131
132int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask)
133{
134 struct idr_layer *idl;
135 int i = 0;
136
137 while (i < MAX_IDR_ID) {
138 idl = &idp->id[i];
139 if (idl->used == 0) {
140 idl->used = 1;
141 idl->ptr = ptr;
Boris Brezillon059e4a62018-12-02 10:54:22 +0100142 idp->updated = true;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200143 return i;
144 }
145 i++;
146 }
147 return -ENOSPC;
148}
149#endif
150
151static DEFINE_IDR(mtd_idr);
152
153/* These are exported solely for the purpose of mtd_blkdevs.c. You
154 should not use them for _anything_ else */
155DEFINE_MUTEX(mtd_table_mutex);
156EXPORT_SYMBOL_GPL(mtd_table_mutex);
157
158struct mtd_info *__mtd_next_device(int i)
159{
160 return idr_get_next(&mtd_idr, &i);
161}
162EXPORT_SYMBOL_GPL(__mtd_next_device);
163
Boris Brezillon059e4a62018-12-02 10:54:22 +0100164bool mtd_dev_list_updated(void)
165{
166 if (mtd_idr.updated) {
167 mtd_idr.updated = false;
168 return true;
169 }
170
171 return false;
172}
173
Heiko Schocherf5895d12014-06-24 10:10:04 +0200174#ifndef __UBOOT__
175static LIST_HEAD(mtd_notifiers);
176
177
178#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
179
180/* REVISIT once MTD uses the driver model better, whoever allocates
181 * the mtd_info will probably want to use the release() hook...
182 */
183static void mtd_release(struct device *dev)
184{
185 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
186 dev_t index = MTD_DEVT(mtd->index);
187
188 /* remove /dev/mtdXro node if needed */
189 if (index)
190 device_destroy(&mtd_class, index + 1);
191}
192
193static int mtd_cls_suspend(struct device *dev, pm_message_t state)
194{
195 struct mtd_info *mtd = dev_get_drvdata(dev);
196
197 return mtd ? mtd_suspend(mtd) : 0;
198}
199
200static int mtd_cls_resume(struct device *dev)
201{
202 struct mtd_info *mtd = dev_get_drvdata(dev);
203
204 if (mtd)
205 mtd_resume(mtd);
206 return 0;
207}
208
209static ssize_t mtd_type_show(struct device *dev,
210 struct device_attribute *attr, char *buf)
211{
212 struct mtd_info *mtd = dev_get_drvdata(dev);
213 char *type;
214
215 switch (mtd->type) {
216 case MTD_ABSENT:
217 type = "absent";
218 break;
219 case MTD_RAM:
220 type = "ram";
221 break;
222 case MTD_ROM:
223 type = "rom";
224 break;
225 case MTD_NORFLASH:
226 type = "nor";
227 break;
228 case MTD_NANDFLASH:
229 type = "nand";
230 break;
231 case MTD_DATAFLASH:
232 type = "dataflash";
233 break;
234 case MTD_UBIVOLUME:
235 type = "ubi";
236 break;
237 case MTD_MLCNANDFLASH:
238 type = "mlc-nand";
239 break;
240 default:
241 type = "unknown";
242 }
243
244 return snprintf(buf, PAGE_SIZE, "%s\n", type);
245}
246static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
247
248static ssize_t mtd_flags_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct mtd_info *mtd = dev_get_drvdata(dev);
252
253 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
254
255}
256static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
257
258static ssize_t mtd_size_show(struct device *dev,
259 struct device_attribute *attr, char *buf)
260{
261 struct mtd_info *mtd = dev_get_drvdata(dev);
262
263 return snprintf(buf, PAGE_SIZE, "%llu\n",
264 (unsigned long long)mtd->size);
265
266}
267static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
268
269static ssize_t mtd_erasesize_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271{
272 struct mtd_info *mtd = dev_get_drvdata(dev);
273
274 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
275
276}
277static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
278
279static ssize_t mtd_writesize_show(struct device *dev,
280 struct device_attribute *attr, char *buf)
281{
282 struct mtd_info *mtd = dev_get_drvdata(dev);
283
284 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
285
286}
287static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
288
289static ssize_t mtd_subpagesize_show(struct device *dev,
290 struct device_attribute *attr, char *buf)
291{
292 struct mtd_info *mtd = dev_get_drvdata(dev);
293 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
294
295 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
296
297}
298static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
299
300static ssize_t mtd_oobsize_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302{
303 struct mtd_info *mtd = dev_get_drvdata(dev);
304
305 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
306
307}
308static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
309
310static ssize_t mtd_numeraseregions_show(struct device *dev,
311 struct device_attribute *attr, char *buf)
312{
313 struct mtd_info *mtd = dev_get_drvdata(dev);
314
315 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
316
317}
318static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
319 NULL);
320
321static ssize_t mtd_name_show(struct device *dev,
322 struct device_attribute *attr, char *buf)
323{
324 struct mtd_info *mtd = dev_get_drvdata(dev);
325
326 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
327
328}
329static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
330
331static ssize_t mtd_ecc_strength_show(struct device *dev,
332 struct device_attribute *attr, char *buf)
333{
334 struct mtd_info *mtd = dev_get_drvdata(dev);
335
336 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
337}
338static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
339
340static ssize_t mtd_bitflip_threshold_show(struct device *dev,
341 struct device_attribute *attr,
342 char *buf)
343{
344 struct mtd_info *mtd = dev_get_drvdata(dev);
345
346 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
347}
348
349static ssize_t mtd_bitflip_threshold_store(struct device *dev,
350 struct device_attribute *attr,
351 const char *buf, size_t count)
352{
353 struct mtd_info *mtd = dev_get_drvdata(dev);
354 unsigned int bitflip_threshold;
355 int retval;
356
357 retval = kstrtouint(buf, 0, &bitflip_threshold);
358 if (retval)
359 return retval;
360
361 mtd->bitflip_threshold = bitflip_threshold;
362 return count;
363}
364static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
365 mtd_bitflip_threshold_show,
366 mtd_bitflip_threshold_store);
367
368static ssize_t mtd_ecc_step_size_show(struct device *dev,
369 struct device_attribute *attr, char *buf)
370{
371 struct mtd_info *mtd = dev_get_drvdata(dev);
372
373 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
374
375}
376static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
377
378static struct attribute *mtd_attrs[] = {
379 &dev_attr_type.attr,
380 &dev_attr_flags.attr,
381 &dev_attr_size.attr,
382 &dev_attr_erasesize.attr,
383 &dev_attr_writesize.attr,
384 &dev_attr_subpagesize.attr,
385 &dev_attr_oobsize.attr,
386 &dev_attr_numeraseregions.attr,
387 &dev_attr_name.attr,
388 &dev_attr_ecc_strength.attr,
389 &dev_attr_ecc_step_size.attr,
390 &dev_attr_bitflip_threshold.attr,
391 NULL,
392};
393ATTRIBUTE_GROUPS(mtd);
394
395static struct device_type mtd_devtype = {
396 .name = "mtd",
397 .groups = mtd_groups,
398 .release = mtd_release,
399};
400#endif
401
402/**
403 * add_mtd_device - register an MTD device
404 * @mtd: pointer to new MTD device info structure
405 *
406 * Add a device to the list of MTD devices present in the system, and
407 * notify each currently active MTD 'user' of its arrival. Returns
408 * zero on success or 1 on failure, which currently will only happen
409 * if there is insufficient memory or a sysfs error.
410 */
411
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100412int add_mtd_device(struct mtd_info *mtd)
413{
Heiko Schocherf5895d12014-06-24 10:10:04 +0200414#ifndef __UBOOT__
415 struct mtd_notifier *not;
416#endif
417 int i, error;
418
419#ifndef __UBOOT__
420 if (!mtd->backing_dev_info) {
421 switch (mtd->type) {
422 case MTD_RAM:
423 mtd->backing_dev_info = &mtd_bdi_rw_mappable;
424 break;
425 case MTD_ROM:
426 mtd->backing_dev_info = &mtd_bdi_ro_mappable;
427 break;
428 default:
429 mtd->backing_dev_info = &mtd_bdi_unmappable;
430 break;
431 }
432 }
433#endif
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100434
435 BUG_ON(mtd->writesize == 0);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200436 mutex_lock(&mtd_table_mutex);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100437
Heiko Schocherf5895d12014-06-24 10:10:04 +0200438 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
439 if (i < 0)
440 goto fail_locked;
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100441
Heiko Schocherf5895d12014-06-24 10:10:04 +0200442 mtd->index = i;
443 mtd->usecount = 0;
Sergey Lapin3a38a552013-01-14 03:46:50 +0000444
Miquel Raynal6382e0c2018-09-29 12:58:27 +0200445 INIT_LIST_HEAD(&mtd->partitions);
446
Heiko Schocherf5895d12014-06-24 10:10:04 +0200447 /* default value if not set by driver */
448 if (mtd->bitflip_threshold == 0)
449 mtd->bitflip_threshold = mtd->ecc_strength;
Sergey Lapin3a38a552013-01-14 03:46:50 +0000450
Heiko Schocherf5895d12014-06-24 10:10:04 +0200451 if (is_power_of_2(mtd->erasesize))
452 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
453 else
454 mtd->erasesize_shift = 0;
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100455
Heiko Schocherf5895d12014-06-24 10:10:04 +0200456 if (is_power_of_2(mtd->writesize))
457 mtd->writesize_shift = ffs(mtd->writesize) - 1;
458 else
459 mtd->writesize_shift = 0;
460
461 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
462 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
463
464 /* Some chips always power up locked. Unlock them now */
465 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
466 error = mtd_unlock(mtd, 0, mtd->size);
467 if (error && error != -EOPNOTSUPP)
468 printk(KERN_WARNING
469 "%s: unlock failed, writes may not work\n",
470 mtd->name);
471 }
472
473#ifndef __UBOOT__
474 /* Caller should have set dev.parent to match the
475 * physical device.
476 */
477 mtd->dev.type = &mtd_devtype;
478 mtd->dev.class = &mtd_class;
479 mtd->dev.devt = MTD_DEVT(i);
480 dev_set_name(&mtd->dev, "mtd%d", i);
481 dev_set_drvdata(&mtd->dev, mtd);
482 if (device_register(&mtd->dev) != 0)
483 goto fail_added;
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100484
Heiko Schocherf5895d12014-06-24 10:10:04 +0200485 if (MTD_DEVT(i))
486 device_create(&mtd_class, mtd->dev.parent,
487 MTD_DEVT(i) + 1,
488 NULL, "mtd%dro", i);
489
490 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
491 /* No need to get a refcount on the module containing
492 the notifier, since we hold the mtd_table_mutex */
493 list_for_each_entry(not, &mtd_notifiers, list)
494 not->add(mtd);
Heiko Schocherb24c4272014-07-15 16:08:42 +0200495#else
496 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200497#endif
498
499 mutex_unlock(&mtd_table_mutex);
500 /* We _know_ we aren't being removed, because
501 our caller is still holding us here. So none
502 of this try_ nonsense, and no bitching about it
503 either. :) */
504 __module_get(THIS_MODULE);
505 return 0;
506
507#ifndef __UBOOT__
508fail_added:
509 idr_remove(&mtd_idr, i);
510#endif
511fail_locked:
512 mutex_unlock(&mtd_table_mutex);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100513 return 1;
514}
515
516/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200517 * del_mtd_device - unregister an MTD device
518 * @mtd: pointer to MTD device info structure
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100519 *
Heiko Schocherf5895d12014-06-24 10:10:04 +0200520 * Remove a device from the list of MTD devices present in the system,
521 * and notify each currently active MTD 'user' of its departure.
522 * Returns zero on success or 1 on failure, which currently will happen
523 * if the requested device does not appear to be present in the list.
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100524 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200525
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100526int del_mtd_device(struct mtd_info *mtd)
527{
528 int ret;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200529#ifndef __UBOOT__
530 struct mtd_notifier *not;
531#endif
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100532
Boris Brezillondb0d8052018-12-02 10:54:24 +0100533 ret = del_mtd_partitions(mtd);
534 if (ret) {
535 debug("Failed to delete MTD partitions attached to %s (err %d)\n",
536 mtd->name, ret);
537 return ret;
538 }
539
Heiko Schocherf5895d12014-06-24 10:10:04 +0200540 mutex_lock(&mtd_table_mutex);
541
542 if (idr_find(&mtd_idr, mtd->index) != mtd) {
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100543 ret = -ENODEV;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200544 goto out_error;
545 }
546
547#ifndef __UBOOT__
548 /* No need to get a refcount on the module containing
549 the notifier, since we hold the mtd_table_mutex */
550 list_for_each_entry(not, &mtd_notifiers, list)
551 not->remove(mtd);
552#endif
553
554 if (mtd->usecount) {
555 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
556 mtd->index, mtd->name, mtd->usecount);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100557 ret = -EBUSY;
558 } else {
Heiko Schocherf5895d12014-06-24 10:10:04 +0200559#ifndef __UBOOT__
560 device_unregister(&mtd->dev);
561#endif
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100562
Heiko Schocherf5895d12014-06-24 10:10:04 +0200563 idr_remove(&mtd_idr, mtd->index);
564
565 module_put(THIS_MODULE);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100566 ret = 0;
567 }
568
Heiko Schocherf5895d12014-06-24 10:10:04 +0200569out_error:
570 mutex_unlock(&mtd_table_mutex);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100571 return ret;
572}
Heiko Schocherf5895d12014-06-24 10:10:04 +0200573
574#ifndef __UBOOT__
575/**
576 * mtd_device_parse_register - parse partitions and register an MTD device.
577 *
578 * @mtd: the MTD device to register
579 * @types: the list of MTD partition probes to try, see
580 * 'parse_mtd_partitions()' for more information
581 * @parser_data: MTD partition parser-specific data
582 * @parts: fallback partition information to register, if parsing fails;
583 * only valid if %nr_parts > %0
584 * @nr_parts: the number of partitions in parts, if zero then the full
585 * MTD device is registered if no partition info is found
586 *
587 * This function aggregates MTD partitions parsing (done by
588 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
589 * basically follows the most common pattern found in many MTD drivers:
590 *
591 * * It first tries to probe partitions on MTD device @mtd using parsers
592 * specified in @types (if @types is %NULL, then the default list of parsers
593 * is used, see 'parse_mtd_partitions()' for more information). If none are
594 * found this functions tries to fallback to information specified in
595 * @parts/@nr_parts.
596 * * If any partitioning info was found, this function registers the found
597 * partitions.
598 * * If no partitions were found this function just registers the MTD device
599 * @mtd and exits.
600 *
601 * Returns zero in case of success and a negative error code in case of failure.
602 */
603int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
604 struct mtd_part_parser_data *parser_data,
605 const struct mtd_partition *parts,
606 int nr_parts)
607{
608 int err;
609 struct mtd_partition *real_parts;
610
611 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
612 if (err <= 0 && nr_parts && parts) {
613 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
614 GFP_KERNEL);
615 if (!real_parts)
616 err = -ENOMEM;
617 else
618 err = nr_parts;
619 }
620
621 if (err > 0) {
622 err = add_mtd_partitions(mtd, real_parts, err);
623 kfree(real_parts);
624 } else if (err == 0) {
625 err = add_mtd_device(mtd);
626 if (err == 1)
627 err = -ENODEV;
628 }
629
630 return err;
631}
632EXPORT_SYMBOL_GPL(mtd_device_parse_register);
633
634/**
635 * mtd_device_unregister - unregister an existing MTD device.
636 *
637 * @master: the MTD device to unregister. This will unregister both the master
638 * and any partitions if registered.
639 */
640int mtd_device_unregister(struct mtd_info *master)
641{
642 int err;
643
644 err = del_mtd_partitions(master);
645 if (err)
646 return err;
647
648 if (!device_is_registered(&master->dev))
649 return 0;
650
651 return del_mtd_device(master);
652}
653EXPORT_SYMBOL_GPL(mtd_device_unregister);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100654
655/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200656 * register_mtd_user - register a 'user' of MTD devices.
657 * @new: pointer to notifier info structure
658 *
659 * Registers a pair of callbacks function to be called upon addition
660 * or removal of MTD devices. Causes the 'add' callback to be immediately
661 * invoked for each MTD device currently present in the system.
662 */
663void register_mtd_user (struct mtd_notifier *new)
664{
665 struct mtd_info *mtd;
666
667 mutex_lock(&mtd_table_mutex);
668
669 list_add(&new->list, &mtd_notifiers);
670
671 __module_get(THIS_MODULE);
672
673 mtd_for_each_device(mtd)
674 new->add(mtd);
675
676 mutex_unlock(&mtd_table_mutex);
677}
678EXPORT_SYMBOL_GPL(register_mtd_user);
679
680/**
681 * unregister_mtd_user - unregister a 'user' of MTD devices.
682 * @old: pointer to notifier info structure
683 *
684 * Removes a callback function pair from the list of 'users' to be
685 * notified upon addition or removal of MTD devices. Causes the
686 * 'remove' callback to be immediately invoked for each MTD device
687 * currently present in the system.
688 */
689int unregister_mtd_user (struct mtd_notifier *old)
690{
691 struct mtd_info *mtd;
692
693 mutex_lock(&mtd_table_mutex);
694
695 module_put(THIS_MODULE);
696
697 mtd_for_each_device(mtd)
698 old->remove(mtd);
699
700 list_del(&old->list);
701 mutex_unlock(&mtd_table_mutex);
702 return 0;
703}
704EXPORT_SYMBOL_GPL(unregister_mtd_user);
705#endif
706
707/**
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100708 * get_mtd_device - obtain a validated handle for an MTD device
709 * @mtd: last known address of the required MTD device
710 * @num: internal device number of the required MTD device
711 *
712 * Given a number and NULL address, return the num'th entry in the device
Heiko Schocherf5895d12014-06-24 10:10:04 +0200713 * table, if any. Given an address and num == -1, search the device table
714 * for a device with that address and return if it's still present. Given
715 * both, return the num'th driver only if its address matches. Return
716 * error code if not.
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100717 */
718struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
719{
Heiko Schocherf5895d12014-06-24 10:10:04 +0200720 struct mtd_info *ret = NULL, *other;
721 int err = -ENODEV;
722
723 mutex_lock(&mtd_table_mutex);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100724
725 if (num == -1) {
Heiko Schocherf5895d12014-06-24 10:10:04 +0200726 mtd_for_each_device(other) {
727 if (other == mtd) {
728 ret = mtd;
729 break;
730 }
731 }
732 } else if (num >= 0) {
733 ret = idr_find(&mtd_idr, num);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100734 if (mtd && mtd != ret)
735 ret = NULL;
736 }
737
Heiko Schocherf5895d12014-06-24 10:10:04 +0200738 if (!ret) {
739 ret = ERR_PTR(err);
740 goto out;
741 }
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100742
Heiko Schocherf5895d12014-06-24 10:10:04 +0200743 err = __get_mtd_device(ret);
744 if (err)
745 ret = ERR_PTR(err);
746out:
747 mutex_unlock(&mtd_table_mutex);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100748 return ret;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200749}
750EXPORT_SYMBOL_GPL(get_mtd_device);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100751
Heiko Schocherf5895d12014-06-24 10:10:04 +0200752
753int __get_mtd_device(struct mtd_info *mtd)
754{
755 int err;
756
757 if (!try_module_get(mtd->owner))
758 return -ENODEV;
759
760 if (mtd->_get_device) {
761 err = mtd->_get_device(mtd);
762
763 if (err) {
764 module_put(mtd->owner);
765 return err;
766 }
767 }
768 mtd->usecount++;
769 return 0;
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100770}
Heiko Schocherf5895d12014-06-24 10:10:04 +0200771EXPORT_SYMBOL_GPL(__get_mtd_device);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100772
773/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200774 * get_mtd_device_nm - obtain a validated handle for an MTD device by
775 * device name
776 * @name: MTD device name to open
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100777 *
Heiko Schocherf5895d12014-06-24 10:10:04 +0200778 * This function returns MTD device description structure in case of
779 * success and an error code in case of failure.
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100780 */
781struct mtd_info *get_mtd_device_nm(const char *name)
782{
Heiko Schocherf5895d12014-06-24 10:10:04 +0200783 int err = -ENODEV;
784 struct mtd_info *mtd = NULL, *other;
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100785
Heiko Schocherf5895d12014-06-24 10:10:04 +0200786 mutex_lock(&mtd_table_mutex);
787
788 mtd_for_each_device(other) {
789 if (!strcmp(name, other->name)) {
790 mtd = other;
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100791 break;
792 }
793 }
794
795 if (!mtd)
796 goto out_unlock;
797
Heiko Schocherf5895d12014-06-24 10:10:04 +0200798 err = __get_mtd_device(mtd);
799 if (err)
800 goto out_unlock;
801
802 mutex_unlock(&mtd_table_mutex);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100803 return mtd;
804
805out_unlock:
Heiko Schocherf5895d12014-06-24 10:10:04 +0200806 mutex_unlock(&mtd_table_mutex);
Kyungmin Parkf6d5e252008-11-19 16:20:36 +0100807 return ERR_PTR(err);
808}
Heiko Schocherf5895d12014-06-24 10:10:04 +0200809EXPORT_SYMBOL_GPL(get_mtd_device_nm);
Ben Gardiner50bae732010-08-31 17:48:01 -0400810
811#if defined(CONFIG_CMD_MTDPARTS_SPREAD)
812/**
813 * mtd_get_len_incl_bad
814 *
815 * Check if length including bad blocks fits into device.
816 *
817 * @param mtd an MTD device
818 * @param offset offset in flash
819 * @param length image length
820 * @return image length including bad blocks in *len_incl_bad and whether or not
821 * the length returned was truncated in *truncated
822 */
823void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
824 const uint64_t length, uint64_t *len_incl_bad,
825 int *truncated)
826{
827 *truncated = 0;
828 *len_incl_bad = 0;
829
maxin.john@enea.comb5ee6e22014-09-08 19:04:16 +0200830 if (!mtd->_block_isbad) {
Ben Gardiner50bae732010-08-31 17:48:01 -0400831 *len_incl_bad = length;
832 return;
833 }
834
835 uint64_t len_excl_bad = 0;
836 uint64_t block_len;
837
838 while (len_excl_bad < length) {
Scott Wood10390ce2010-09-09 15:40:03 -0500839 if (offset >= mtd->size) {
840 *truncated = 1;
841 return;
842 }
843
Ben Gardiner50bae732010-08-31 17:48:01 -0400844 block_len = mtd->erasesize - (offset & (mtd->erasesize - 1));
845
maxin.john@enea.comb5ee6e22014-09-08 19:04:16 +0200846 if (!mtd->_block_isbad(mtd, offset & ~(mtd->erasesize - 1)))
Ben Gardiner50bae732010-08-31 17:48:01 -0400847 len_excl_bad += block_len;
848
849 *len_incl_bad += block_len;
850 offset += block_len;
Ben Gardiner50bae732010-08-31 17:48:01 -0400851 }
852}
853#endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000854
Heiko Schocherf5895d12014-06-24 10:10:04 +0200855void put_mtd_device(struct mtd_info *mtd)
856{
857 mutex_lock(&mtd_table_mutex);
858 __put_mtd_device(mtd);
859 mutex_unlock(&mtd_table_mutex);
860
861}
862EXPORT_SYMBOL_GPL(put_mtd_device);
863
864void __put_mtd_device(struct mtd_info *mtd)
865{
866 --mtd->usecount;
867 BUG_ON(mtd->usecount < 0);
868
869 if (mtd->_put_device)
870 mtd->_put_device(mtd);
871
872 module_put(mtd->owner);
873}
874EXPORT_SYMBOL_GPL(__put_mtd_device);
875
876/*
Sergey Lapin3a38a552013-01-14 03:46:50 +0000877 * Erase is an asynchronous operation. Device drivers are supposed
878 * to call instr->callback() whenever the operation completes, even
879 * if it completes with a failure.
880 * Callers are supposed to pass a callback function and wait for it
881 * to be called before writing to the block.
882 */
883int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
884{
885 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
886 return -EINVAL;
887 if (!(mtd->flags & MTD_WRITEABLE))
888 return -EROFS;
889 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
890 if (!instr->len) {
891 instr->state = MTD_ERASE_DONE;
892 mtd_erase_callback(instr);
893 return 0;
894 }
895 return mtd->_erase(mtd, instr);
896}
Heiko Schocherf5895d12014-06-24 10:10:04 +0200897EXPORT_SYMBOL_GPL(mtd_erase);
898
899#ifndef __UBOOT__
900/*
901 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
902 */
903int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
904 void **virt, resource_size_t *phys)
905{
906 *retlen = 0;
907 *virt = NULL;
908 if (phys)
909 *phys = 0;
910 if (!mtd->_point)
911 return -EOPNOTSUPP;
912 if (from < 0 || from > mtd->size || len > mtd->size - from)
913 return -EINVAL;
914 if (!len)
915 return 0;
916 return mtd->_point(mtd, from, len, retlen, virt, phys);
917}
918EXPORT_SYMBOL_GPL(mtd_point);
919
920/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
921int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
922{
923 if (!mtd->_point)
924 return -EOPNOTSUPP;
925 if (from < 0 || from > mtd->size || len > mtd->size - from)
926 return -EINVAL;
927 if (!len)
928 return 0;
929 return mtd->_unpoint(mtd, from, len);
930}
931EXPORT_SYMBOL_GPL(mtd_unpoint);
932#endif
933
934/*
935 * Allow NOMMU mmap() to directly map the device (if not NULL)
936 * - return the address to which the offset maps
937 * - return -ENOSYS to indicate refusal to do the mapping
938 */
939unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
940 unsigned long offset, unsigned long flags)
941{
942 if (!mtd->_get_unmapped_area)
943 return -EOPNOTSUPP;
944 if (offset > mtd->size || len > mtd->size - offset)
945 return -EINVAL;
946 return mtd->_get_unmapped_area(mtd, len, offset, flags);
947}
948EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000949
950int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
951 u_char *buf)
952{
Paul Burton700a76c2013-09-04 15:16:56 +0100953 int ret_code;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200954 *retlen = 0;
Sergey Lapin3a38a552013-01-14 03:46:50 +0000955 if (from < 0 || from > mtd->size || len > mtd->size - from)
956 return -EINVAL;
957 if (!len)
958 return 0;
Paul Burton700a76c2013-09-04 15:16:56 +0100959
960 /*
961 * In the absence of an error, drivers return a non-negative integer
962 * representing the maximum number of bitflips that were corrected on
963 * any one ecc region (if applicable; zero otherwise).
964 */
Boris Brezillon6c20df72018-08-16 17:29:59 +0200965 if (mtd->_read) {
966 ret_code = mtd->_read(mtd, from, len, retlen, buf);
967 } else if (mtd->_read_oob) {
968 struct mtd_oob_ops ops = {
969 .len = len,
970 .datbuf = buf,
971 };
972
973 ret_code = mtd->_read_oob(mtd, from, &ops);
974 *retlen = ops.retlen;
975 } else {
976 return -ENOTSUPP;
977 }
978
Paul Burton700a76c2013-09-04 15:16:56 +0100979 if (unlikely(ret_code < 0))
980 return ret_code;
981 if (mtd->ecc_strength == 0)
982 return 0; /* device lacks ecc */
983 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
Sergey Lapin3a38a552013-01-14 03:46:50 +0000984}
Heiko Schocherf5895d12014-06-24 10:10:04 +0200985EXPORT_SYMBOL_GPL(mtd_read);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000986
987int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
988 const u_char *buf)
989{
990 *retlen = 0;
991 if (to < 0 || to > mtd->size || len > mtd->size - to)
992 return -EINVAL;
Boris Brezillon6c20df72018-08-16 17:29:59 +0200993 if ((!mtd->_write && !mtd->_write_oob) ||
994 !(mtd->flags & MTD_WRITEABLE))
Sergey Lapin3a38a552013-01-14 03:46:50 +0000995 return -EROFS;
996 if (!len)
997 return 0;
Boris Brezillon6c20df72018-08-16 17:29:59 +0200998
999 if (!mtd->_write) {
1000 struct mtd_oob_ops ops = {
1001 .len = len,
1002 .datbuf = (u8 *)buf,
1003 };
1004 int ret;
1005
1006 ret = mtd->_write_oob(mtd, to, &ops);
1007 *retlen = ops.retlen;
1008 return ret;
1009 }
1010
Sergey Lapin3a38a552013-01-14 03:46:50 +00001011 return mtd->_write(mtd, to, len, retlen, buf);
1012}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001013EXPORT_SYMBOL_GPL(mtd_write);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001014
1015/*
1016 * In blackbox flight recorder like scenarios we want to make successful writes
1017 * in interrupt context. panic_write() is only intended to be called when its
1018 * known the kernel is about to panic and we need the write to succeed. Since
1019 * the kernel is not going to be running for much longer, this function can
1020 * break locks and delay to ensure the write succeeds (but not sleep).
1021 */
1022int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1023 const u_char *buf)
1024{
1025 *retlen = 0;
1026 if (!mtd->_panic_write)
1027 return -EOPNOTSUPP;
1028 if (to < 0 || to > mtd->size || len > mtd->size - to)
1029 return -EINVAL;
1030 if (!(mtd->flags & MTD_WRITEABLE))
1031 return -EROFS;
1032 if (!len)
1033 return 0;
1034 return mtd->_panic_write(mtd, to, len, retlen, buf);
1035}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001036EXPORT_SYMBOL_GPL(mtd_panic_write);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001037
Boris Brezillonb74b94f2018-08-16 17:30:01 +02001038static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1039 struct mtd_oob_ops *ops)
1040{
1041 /*
1042 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1043 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1044 * this case.
1045 */
1046 if (!ops->datbuf)
1047 ops->len = 0;
1048
1049 if (!ops->oobbuf)
1050 ops->ooblen = 0;
1051
1052 if (offs < 0 || offs + ops->len > mtd->size)
1053 return -EINVAL;
1054
1055 if (ops->ooblen) {
Miquel Raynal3a3146c2018-11-18 21:11:47 +01001056 size_t maxooblen;
Boris Brezillonb74b94f2018-08-16 17:30:01 +02001057
1058 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1059 return -EINVAL;
1060
Miquel Raynal3a3146c2018-11-18 21:11:47 +01001061 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1062 mtd_div_by_ws(offs, mtd)) *
Boris Brezillonb74b94f2018-08-16 17:30:01 +02001063 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1064 if (ops->ooblen > maxooblen)
1065 return -EINVAL;
1066 }
1067
1068 return 0;
1069}
1070
Sergey Lapin3a38a552013-01-14 03:46:50 +00001071int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1072{
Heiko Schocherf5895d12014-06-24 10:10:04 +02001073 int ret_code;
Sergey Lapin3a38a552013-01-14 03:46:50 +00001074 ops->retlen = ops->oobretlen = 0;
Boris Brezillonb74b94f2018-08-16 17:30:01 +02001075
1076 ret_code = mtd_check_oob_ops(mtd, from, ops);
1077 if (ret_code)
1078 return ret_code;
1079
Miquel Raynal19ea9252018-08-16 17:30:02 +02001080 /* Check the validity of a potential fallback on mtd->_read */
1081 if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
1082 return -EOPNOTSUPP;
1083
1084 if (mtd->_read_oob)
1085 ret_code = mtd->_read_oob(mtd, from, ops);
1086 else
1087 ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
1088 ops->datbuf);
1089
Heiko Schocherf5895d12014-06-24 10:10:04 +02001090 /*
1091 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1092 * similar to mtd->_read(), returning a non-negative integer
1093 * representing max bitflips. In other cases, mtd->_read_oob() may
1094 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1095 */
Heiko Schocherf5895d12014-06-24 10:10:04 +02001096 if (unlikely(ret_code < 0))
1097 return ret_code;
1098 if (mtd->ecc_strength == 0)
1099 return 0; /* device lacks ecc */
1100 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
Sergey Lapin3a38a552013-01-14 03:46:50 +00001101}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001102EXPORT_SYMBOL_GPL(mtd_read_oob);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001103
Ezequiel Garcia18e75412018-08-16 17:30:00 +02001104int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1105 struct mtd_oob_ops *ops)
1106{
Boris Brezillonb74b94f2018-08-16 17:30:01 +02001107 int ret;
1108
Ezequiel Garcia18e75412018-08-16 17:30:00 +02001109 ops->retlen = ops->oobretlen = 0;
Miquel Raynal19ea9252018-08-16 17:30:02 +02001110
Ezequiel Garcia18e75412018-08-16 17:30:00 +02001111 if (!(mtd->flags & MTD_WRITEABLE))
1112 return -EROFS;
Boris Brezillonb74b94f2018-08-16 17:30:01 +02001113
1114 ret = mtd_check_oob_ops(mtd, to, ops);
1115 if (ret)
1116 return ret;
1117
Miquel Raynal19ea9252018-08-16 17:30:02 +02001118 /* Check the validity of a potential fallback on mtd->_write */
1119 if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
1120 return -EOPNOTSUPP;
1121
1122 if (mtd->_write_oob)
1123 return mtd->_write_oob(mtd, to, ops);
1124 else
1125 return mtd->_write(mtd, to, ops->len, &ops->retlen,
1126 ops->datbuf);
Ezequiel Garcia18e75412018-08-16 17:30:00 +02001127}
1128EXPORT_SYMBOL_GPL(mtd_write_oob);
1129
Boris Brezillone1b1e3a2017-11-22 02:38:23 +09001130/**
1131 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1132 * @mtd: MTD device structure
1133 * @section: ECC section. Depending on the layout you may have all the ECC
1134 * bytes stored in a single contiguous section, or one section
1135 * per ECC chunk (and sometime several sections for a single ECC
1136 * ECC chunk)
1137 * @oobecc: OOB region struct filled with the appropriate ECC position
1138 * information
1139 *
1140 * This function returns ECC section information in the OOB area. If you want
1141 * to get all the ECC bytes information, then you should call
1142 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1143 *
1144 * Returns zero on success, a negative error code otherwise.
1145 */
1146int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1147 struct mtd_oob_region *oobecc)
1148{
1149 memset(oobecc, 0, sizeof(*oobecc));
1150
1151 if (!mtd || section < 0)
1152 return -EINVAL;
1153
1154 if (!mtd->ooblayout || !mtd->ooblayout->ecc)
1155 return -ENOTSUPP;
1156
1157 return mtd->ooblayout->ecc(mtd, section, oobecc);
1158}
1159EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1160
1161/**
1162 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1163 * section
1164 * @mtd: MTD device structure
1165 * @section: Free section you are interested in. Depending on the layout
1166 * you may have all the free bytes stored in a single contiguous
1167 * section, or one section per ECC chunk plus an extra section
1168 * for the remaining bytes (or other funky layout).
1169 * @oobfree: OOB region struct filled with the appropriate free position
1170 * information
1171 *
1172 * This function returns free bytes position in the OOB area. If you want
1173 * to get all the free bytes information, then you should call
1174 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1175 *
1176 * Returns zero on success, a negative error code otherwise.
1177 */
1178int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1179 struct mtd_oob_region *oobfree)
1180{
1181 memset(oobfree, 0, sizeof(*oobfree));
1182
1183 if (!mtd || section < 0)
1184 return -EINVAL;
1185
Simon Glass62fd1a42020-02-03 07:35:56 -07001186 if (!mtd->ooblayout || !mtd->ooblayout->rfree)
Boris Brezillone1b1e3a2017-11-22 02:38:23 +09001187 return -ENOTSUPP;
1188
Simon Glass62fd1a42020-02-03 07:35:56 -07001189 return mtd->ooblayout->rfree(mtd, section, oobfree);
Boris Brezillone1b1e3a2017-11-22 02:38:23 +09001190}
1191EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1192
1193/**
1194 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1195 * @mtd: mtd info structure
1196 * @byte: the byte we are searching for
1197 * @sectionp: pointer where the section id will be stored
1198 * @oobregion: used to retrieve the ECC position
1199 * @iter: iterator function. Should be either mtd_ooblayout_free or
1200 * mtd_ooblayout_ecc depending on the region type you're searching for
1201 *
1202 * This function returns the section id and oobregion information of a
1203 * specific byte. For example, say you want to know where the 4th ECC byte is
1204 * stored, you'll use:
1205 *
1206 * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1207 *
1208 * Returns zero on success, a negative error code otherwise.
1209 */
1210static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1211 int *sectionp, struct mtd_oob_region *oobregion,
1212 int (*iter)(struct mtd_info *,
1213 int section,
1214 struct mtd_oob_region *oobregion))
1215{
1216 int pos = 0, ret, section = 0;
1217
1218 memset(oobregion, 0, sizeof(*oobregion));
1219
1220 while (1) {
1221 ret = iter(mtd, section, oobregion);
1222 if (ret)
1223 return ret;
1224
1225 if (pos + oobregion->length > byte)
1226 break;
1227
1228 pos += oobregion->length;
1229 section++;
1230 }
1231
1232 /*
1233 * Adjust region info to make it start at the beginning at the
1234 * 'start' ECC byte.
1235 */
1236 oobregion->offset += byte - pos;
1237 oobregion->length -= byte - pos;
1238 *sectionp = section;
1239
1240 return 0;
1241}
1242
1243/**
1244 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1245 * ECC byte
1246 * @mtd: mtd info structure
1247 * @eccbyte: the byte we are searching for
1248 * @sectionp: pointer where the section id will be stored
1249 * @oobregion: OOB region information
1250 *
1251 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1252 * byte.
1253 *
1254 * Returns zero on success, a negative error code otherwise.
1255 */
1256int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1257 int *section,
1258 struct mtd_oob_region *oobregion)
1259{
1260 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1261 mtd_ooblayout_ecc);
1262}
1263EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1264
1265/**
1266 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1267 * @mtd: mtd info structure
1268 * @buf: destination buffer to store OOB bytes
1269 * @oobbuf: OOB buffer
1270 * @start: first byte to retrieve
1271 * @nbytes: number of bytes to retrieve
1272 * @iter: section iterator
1273 *
1274 * Extract bytes attached to a specific category (ECC or free)
1275 * from the OOB buffer and copy them into buf.
1276 *
1277 * Returns zero on success, a negative error code otherwise.
1278 */
1279static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1280 const u8 *oobbuf, int start, int nbytes,
1281 int (*iter)(struct mtd_info *,
1282 int section,
1283 struct mtd_oob_region *oobregion))
1284{
1285 struct mtd_oob_region oobregion;
1286 int section, ret;
1287
1288 ret = mtd_ooblayout_find_region(mtd, start, &section,
1289 &oobregion, iter);
1290
1291 while (!ret) {
1292 int cnt;
1293
1294 cnt = min_t(int, nbytes, oobregion.length);
1295 memcpy(buf, oobbuf + oobregion.offset, cnt);
1296 buf += cnt;
1297 nbytes -= cnt;
1298
1299 if (!nbytes)
1300 break;
1301
1302 ret = iter(mtd, ++section, &oobregion);
1303 }
1304
1305 return ret;
1306}
1307
1308/**
1309 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1310 * @mtd: mtd info structure
1311 * @buf: source buffer to get OOB bytes from
1312 * @oobbuf: OOB buffer
1313 * @start: first OOB byte to set
1314 * @nbytes: number of OOB bytes to set
1315 * @iter: section iterator
1316 *
1317 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1318 * is selected by passing the appropriate iterator.
1319 *
1320 * Returns zero on success, a negative error code otherwise.
1321 */
1322static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1323 u8 *oobbuf, int start, int nbytes,
1324 int (*iter)(struct mtd_info *,
1325 int section,
1326 struct mtd_oob_region *oobregion))
1327{
1328 struct mtd_oob_region oobregion;
1329 int section, ret;
1330
1331 ret = mtd_ooblayout_find_region(mtd, start, &section,
1332 &oobregion, iter);
1333
1334 while (!ret) {
1335 int cnt;
1336
1337 cnt = min_t(int, nbytes, oobregion.length);
1338 memcpy(oobbuf + oobregion.offset, buf, cnt);
1339 buf += cnt;
1340 nbytes -= cnt;
1341
1342 if (!nbytes)
1343 break;
1344
1345 ret = iter(mtd, ++section, &oobregion);
1346 }
1347
1348 return ret;
1349}
1350
1351/**
1352 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1353 * @mtd: mtd info structure
1354 * @iter: category iterator
1355 *
1356 * Count the number of bytes in a given category.
1357 *
1358 * Returns a positive value on success, a negative error code otherwise.
1359 */
1360static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1361 int (*iter)(struct mtd_info *,
1362 int section,
1363 struct mtd_oob_region *oobregion))
1364{
1365 struct mtd_oob_region oobregion;
1366 int section = 0, ret, nbytes = 0;
1367
1368 while (1) {
1369 ret = iter(mtd, section++, &oobregion);
1370 if (ret) {
1371 if (ret == -ERANGE)
1372 ret = nbytes;
1373 break;
1374 }
1375
1376 nbytes += oobregion.length;
1377 }
1378
1379 return ret;
1380}
1381
1382/**
1383 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1384 * @mtd: mtd info structure
1385 * @eccbuf: destination buffer to store ECC bytes
1386 * @oobbuf: OOB buffer
1387 * @start: first ECC byte to retrieve
1388 * @nbytes: number of ECC bytes to retrieve
1389 *
1390 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1391 *
1392 * Returns zero on success, a negative error code otherwise.
1393 */
1394int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1395 const u8 *oobbuf, int start, int nbytes)
1396{
1397 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1398 mtd_ooblayout_ecc);
1399}
1400EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1401
1402/**
1403 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1404 * @mtd: mtd info structure
1405 * @eccbuf: source buffer to get ECC bytes from
1406 * @oobbuf: OOB buffer
1407 * @start: first ECC byte to set
1408 * @nbytes: number of ECC bytes to set
1409 *
1410 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1411 *
1412 * Returns zero on success, a negative error code otherwise.
1413 */
1414int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1415 u8 *oobbuf, int start, int nbytes)
1416{
1417 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1418 mtd_ooblayout_ecc);
1419}
1420EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1421
1422/**
1423 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1424 * @mtd: mtd info structure
1425 * @databuf: destination buffer to store ECC bytes
1426 * @oobbuf: OOB buffer
1427 * @start: first ECC byte to retrieve
1428 * @nbytes: number of ECC bytes to retrieve
1429 *
1430 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1431 *
1432 * Returns zero on success, a negative error code otherwise.
1433 */
1434int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1435 const u8 *oobbuf, int start, int nbytes)
1436{
1437 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1438 mtd_ooblayout_free);
1439}
1440EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1441
1442/**
1443 * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
1444 * @mtd: mtd info structure
1445 * @eccbuf: source buffer to get data bytes from
1446 * @oobbuf: OOB buffer
1447 * @start: first ECC byte to set
1448 * @nbytes: number of ECC bytes to set
1449 *
1450 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1451 *
1452 * Returns zero on success, a negative error code otherwise.
1453 */
1454int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1455 u8 *oobbuf, int start, int nbytes)
1456{
1457 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1458 mtd_ooblayout_free);
1459}
1460EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1461
1462/**
1463 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1464 * @mtd: mtd info structure
1465 *
1466 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1467 *
1468 * Returns zero on success, a negative error code otherwise.
1469 */
1470int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1471{
1472 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1473}
1474EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1475
1476/**
1477 * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
1478 * @mtd: mtd info structure
1479 *
1480 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1481 *
1482 * Returns zero on success, a negative error code otherwise.
1483 */
1484int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1485{
1486 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1487}
1488EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1489
Sergey Lapin3a38a552013-01-14 03:46:50 +00001490/*
1491 * Method to access the protection register area, present in some flash
1492 * devices. The user data is one time programmable but the factory data is read
1493 * only.
1494 */
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001495int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1496 struct otp_info *buf)
Sergey Lapin3a38a552013-01-14 03:46:50 +00001497{
1498 if (!mtd->_get_fact_prot_info)
1499 return -EOPNOTSUPP;
1500 if (!len)
1501 return 0;
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001502 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001503}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001504EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001505
1506int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1507 size_t *retlen, u_char *buf)
1508{
1509 *retlen = 0;
1510 if (!mtd->_read_fact_prot_reg)
1511 return -EOPNOTSUPP;
1512 if (!len)
1513 return 0;
1514 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
1515}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001516EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001517
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001518int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1519 struct otp_info *buf)
Sergey Lapin3a38a552013-01-14 03:46:50 +00001520{
1521 if (!mtd->_get_user_prot_info)
1522 return -EOPNOTSUPP;
1523 if (!len)
1524 return 0;
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001525 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001526}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001527EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001528
1529int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1530 size_t *retlen, u_char *buf)
1531{
1532 *retlen = 0;
1533 if (!mtd->_read_user_prot_reg)
1534 return -EOPNOTSUPP;
1535 if (!len)
1536 return 0;
1537 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1538}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001539EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001540
1541int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1542 size_t *retlen, u_char *buf)
1543{
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001544 int ret;
1545
Sergey Lapin3a38a552013-01-14 03:46:50 +00001546 *retlen = 0;
1547 if (!mtd->_write_user_prot_reg)
1548 return -EOPNOTSUPP;
1549 if (!len)
1550 return 0;
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001551 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1552 if (ret)
1553 return ret;
1554
1555 /*
1556 * If no data could be written at all, we are out of memory and
1557 * must return -ENOSPC.
1558 */
1559 return (*retlen) ? 0 : -ENOSPC;
Sergey Lapin3a38a552013-01-14 03:46:50 +00001560}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001561EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001562
1563int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1564{
1565 if (!mtd->_lock_user_prot_reg)
1566 return -EOPNOTSUPP;
1567 if (!len)
1568 return 0;
1569 return mtd->_lock_user_prot_reg(mtd, from, len);
1570}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001571EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001572
1573/* Chip-supported device locking */
1574int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1575{
1576 if (!mtd->_lock)
1577 return -EOPNOTSUPP;
1578 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1579 return -EINVAL;
1580 if (!len)
1581 return 0;
1582 return mtd->_lock(mtd, ofs, len);
1583}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001584EXPORT_SYMBOL_GPL(mtd_lock);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001585
1586int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1587{
1588 if (!mtd->_unlock)
1589 return -EOPNOTSUPP;
1590 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1591 return -EINVAL;
1592 if (!len)
1593 return 0;
1594 return mtd->_unlock(mtd, ofs, len);
1595}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001596EXPORT_SYMBOL_GPL(mtd_unlock);
1597
1598int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1599{
1600 if (!mtd->_is_locked)
1601 return -EOPNOTSUPP;
1602 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1603 return -EINVAL;
1604 if (!len)
1605 return 0;
1606 return mtd->_is_locked(mtd, ofs, len);
1607}
1608EXPORT_SYMBOL_GPL(mtd_is_locked);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001609
Ezequiel Garciafc9d57c2014-05-21 19:06:12 -03001610int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
Sergey Lapin3a38a552013-01-14 03:46:50 +00001611{
Ezequiel Garciafc9d57c2014-05-21 19:06:12 -03001612 if (ofs < 0 || ofs > mtd->size)
1613 return -EINVAL;
1614 if (!mtd->_block_isreserved)
Sergey Lapin3a38a552013-01-14 03:46:50 +00001615 return 0;
Ezequiel Garciafc9d57c2014-05-21 19:06:12 -03001616 return mtd->_block_isreserved(mtd, ofs);
1617}
1618EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1619
1620int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1621{
Sergey Lapin3a38a552013-01-14 03:46:50 +00001622 if (ofs < 0 || ofs > mtd->size)
1623 return -EINVAL;
Ezequiel Garciafc9d57c2014-05-21 19:06:12 -03001624 if (!mtd->_block_isbad)
1625 return 0;
Sergey Lapin3a38a552013-01-14 03:46:50 +00001626 return mtd->_block_isbad(mtd, ofs);
1627}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001628EXPORT_SYMBOL_GPL(mtd_block_isbad);
Sergey Lapin3a38a552013-01-14 03:46:50 +00001629
1630int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1631{
1632 if (!mtd->_block_markbad)
1633 return -EOPNOTSUPP;
1634 if (ofs < 0 || ofs > mtd->size)
1635 return -EINVAL;
1636 if (!(mtd->flags & MTD_WRITEABLE))
1637 return -EROFS;
1638 return mtd->_block_markbad(mtd, ofs);
1639}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001640EXPORT_SYMBOL_GPL(mtd_block_markbad);
1641
1642#ifndef __UBOOT__
1643/*
1644 * default_mtd_writev - the default writev method
1645 * @mtd: mtd device description object pointer
1646 * @vecs: the vectors to write
1647 * @count: count of vectors in @vecs
1648 * @to: the MTD device offset to write to
1649 * @retlen: on exit contains the count of bytes written to the MTD device.
1650 *
1651 * This function returns zero in case of success and a negative error code in
1652 * case of failure.
1653 */
1654static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1655 unsigned long count, loff_t to, size_t *retlen)
1656{
1657 unsigned long i;
1658 size_t totlen = 0, thislen;
1659 int ret = 0;
1660
1661 for (i = 0; i < count; i++) {
1662 if (!vecs[i].iov_len)
1663 continue;
1664 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1665 vecs[i].iov_base);
1666 totlen += thislen;
1667 if (ret || thislen != vecs[i].iov_len)
1668 break;
1669 to += vecs[i].iov_len;
1670 }
1671 *retlen = totlen;
1672 return ret;
1673}
1674
1675/*
1676 * mtd_writev - the vector-based MTD write method
1677 * @mtd: mtd device description object pointer
1678 * @vecs: the vectors to write
1679 * @count: count of vectors in @vecs
1680 * @to: the MTD device offset to write to
1681 * @retlen: on exit contains the count of bytes written to the MTD device.
1682 *
1683 * This function returns zero in case of success and a negative error code in
1684 * case of failure.
1685 */
1686int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1687 unsigned long count, loff_t to, size_t *retlen)
1688{
1689 *retlen = 0;
1690 if (!(mtd->flags & MTD_WRITEABLE))
1691 return -EROFS;
1692 if (!mtd->_writev)
1693 return default_mtd_writev(mtd, vecs, count, to, retlen);
1694 return mtd->_writev(mtd, vecs, count, to, retlen);
1695}
1696EXPORT_SYMBOL_GPL(mtd_writev);
1697
1698/**
1699 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
1700 * @mtd: mtd device description object pointer
1701 * @size: a pointer to the ideal or maximum size of the allocation, points
1702 * to the actual allocation size on success.
1703 *
1704 * This routine attempts to allocate a contiguous kernel buffer up to
1705 * the specified size, backing off the size of the request exponentially
1706 * until the request succeeds or until the allocation size falls below
1707 * the system page size. This attempts to make sure it does not adversely
1708 * impact system performance, so when allocating more than one page, we
1709 * ask the memory allocator to avoid re-trying, swapping, writing back
1710 * or performing I/O.
1711 *
1712 * Note, this function also makes sure that the allocated buffer is aligned to
1713 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
1714 *
1715 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
1716 * to handle smaller (i.e. degraded) buffer allocations under low- or
1717 * fragmented-memory situations where such reduced allocations, from a
1718 * requested ideal, are allowed.
1719 *
1720 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
1721 */
1722void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1723{
1724 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1725 __GFP_NORETRY | __GFP_NO_KSWAPD;
1726 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1727 void *kbuf;
1728
1729 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1730
1731 while (*size > min_alloc) {
1732 kbuf = kmalloc(*size, flags);
1733 if (kbuf)
1734 return kbuf;
1735
1736 *size >>= 1;
1737 *size = ALIGN(*size, mtd->writesize);
1738 }
1739
1740 /*
1741 * For the last resort allocation allow 'kmalloc()' to do all sorts of
1742 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
1743 */
1744 return kmalloc(*size, GFP_KERNEL);
1745}
1746EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1747#endif
1748
1749#ifdef CONFIG_PROC_FS
1750
1751/*====================================================================*/
1752/* Support for /proc/mtd */
1753
1754static int mtd_proc_show(struct seq_file *m, void *v)
1755{
1756 struct mtd_info *mtd;
1757
1758 seq_puts(m, "dev: size erasesize name\n");
1759 mutex_lock(&mtd_table_mutex);
1760 mtd_for_each_device(mtd) {
1761 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1762 mtd->index, (unsigned long long)mtd->size,
1763 mtd->erasesize, mtd->name);
1764 }
1765 mutex_unlock(&mtd_table_mutex);
1766 return 0;
1767}
1768
1769static int mtd_proc_open(struct inode *inode, struct file *file)
1770{
1771 return single_open(file, mtd_proc_show, NULL);
1772}
1773
1774static const struct file_operations mtd_proc_ops = {
1775 .open = mtd_proc_open,
1776 .read = seq_read,
1777 .llseek = seq_lseek,
1778 .release = single_release,
1779};
1780#endif /* CONFIG_PROC_FS */
1781
1782/*====================================================================*/
1783/* Init code */
1784
1785#ifndef __UBOOT__
1786static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1787{
1788 int ret;
1789
1790 ret = bdi_init(bdi);
1791 if (!ret)
1792 ret = bdi_register(bdi, NULL, "%s", name);
1793
1794 if (ret)
1795 bdi_destroy(bdi);
1796
1797 return ret;
1798}
1799
1800static struct proc_dir_entry *proc_mtd;
1801
1802static int __init init_mtd(void)
1803{
1804 int ret;
1805
1806 ret = class_register(&mtd_class);
1807 if (ret)
1808 goto err_reg;
1809
1810 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
1811 if (ret)
1812 goto err_bdi1;
1813
1814 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
1815 if (ret)
1816 goto err_bdi2;
1817
1818 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
1819 if (ret)
1820 goto err_bdi3;
1821
1822 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1823
1824 ret = init_mtdchar();
1825 if (ret)
1826 goto out_procfs;
1827
1828 return 0;
1829
1830out_procfs:
1831 if (proc_mtd)
1832 remove_proc_entry("mtd", NULL);
1833err_bdi3:
1834 bdi_destroy(&mtd_bdi_ro_mappable);
1835err_bdi2:
1836 bdi_destroy(&mtd_bdi_unmappable);
1837err_bdi1:
1838 class_unregister(&mtd_class);
1839err_reg:
1840 pr_err("Error registering mtd class or bdi: %d\n", ret);
1841 return ret;
1842}
1843
1844static void __exit cleanup_mtd(void)
1845{
1846 cleanup_mtdchar();
1847 if (proc_mtd)
1848 remove_proc_entry("mtd", NULL);
1849 class_unregister(&mtd_class);
1850 bdi_destroy(&mtd_bdi_unmappable);
1851 bdi_destroy(&mtd_bdi_ro_mappable);
1852 bdi_destroy(&mtd_bdi_rw_mappable);
1853}
1854
1855module_init(init_mtd);
1856module_exit(cleanup_mtd);
1857#endif
1858
1859MODULE_LICENSE("GPL");
1860MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1861MODULE_DESCRIPTION("Core MTD registration and access routines");