blob: 3ac0b194028385f7f88ee36167a5f7e42a6a84db [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kyungmin Park1d8dca62008-11-19 16:23:06 +01002/*
3 * Copyright (c) International Business Machines Corp., 2006
4 * Copyright (c) Nokia Corporation, 2007
5 *
Kyungmin Park1d8dca62008-11-19 16:23:06 +01006 * Author: Artem Bityutskiy (Битюцкий Артём),
7 * Frank Haverkamp
8 */
9
10/*
11 * This file includes UBI initialization and building of UBI devices.
12 *
13 * When UBI is initialized, it attaches all the MTD devices specified as the
14 * module load parameters or the kernel boot parameters. If MTD devices were
15 * specified, UBI does not attach any MTD device, but it is possible to do
16 * later using the "UBI control device".
Kyungmin Park1d8dca62008-11-19 16:23:06 +010017 */
18
Heiko Schocherf5895d12014-06-24 10:10:04 +020019#ifndef __UBOOT__
Simon Glass0f2af882020-05-10 11:40:05 -060020#include <log.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070021#include <dm/devres.h>
Kyungmin Park1d8dca62008-11-19 16:23:06 +010022#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/stringify.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020025#include <linux/namei.h>
Kyungmin Park1d8dca62008-11-19 16:23:06 +010026#include <linux/stat.h>
27#include <linux/miscdevice.h>
28#include <linux/log2.h>
29#include <linux/kthread.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020030#include <linux/kernel.h>
31#include <linux/slab.h>
32#include <linux/major.h>
33#else
Masahiro Yamada78eeb912016-01-24 23:27:48 +090034#include <linux/bug.h>
Fabio Estevam0297d1e2015-11-05 12:43:39 -020035#include <linux/log2.h>
Kyungmin Park1d8dca62008-11-19 16:23:06 +010036#endif
Heiko Schocherf5895d12014-06-24 10:10:04 +020037#include <linux/err.h>
Kyungmin Park1d8dca62008-11-19 16:23:06 +010038#include <ubi_uboot.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020039#include <linux/mtd/partitions.h>
40
Kyungmin Park1d8dca62008-11-19 16:23:06 +010041#include "ubi.h"
42
Heiko Schocherf5895d12014-06-24 10:10:04 +020043/* Maximum length of the 'mtd=' parameter */
44#define MTD_PARAM_LEN_MAX 64
45
46/* Maximum number of comma-separated items in the 'mtd=' parameter */
47#define MTD_PARAM_MAX_COUNT 4
48
49/* Maximum value for the number of bad PEBs per 1024 PEBs */
50#define MAX_MTD_UBI_BEB_LIMIT 768
51
52#ifdef CONFIG_MTD_UBI_MODULE
53#define ubi_is_module() 1
54#else
55#define ubi_is_module() 0
56#endif
57
Stefan Roesecb65dc72009-06-04 16:55:34 +020058#if (CONFIG_SYS_MALLOC_LEN < (512 << 10))
59#error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k
60#endif
61
Kyungmin Park1d8dca62008-11-19 16:23:06 +010062/**
63 * struct mtd_dev_param - MTD device parameter description data structure.
Heiko Schocherf5895d12014-06-24 10:10:04 +020064 * @name: MTD character device node path, MTD device name, or MTD device number
65 * string
Kyungmin Park1d8dca62008-11-19 16:23:06 +010066 * @vid_hdr_offs: VID header offset
Heiko Schocherf5895d12014-06-24 10:10:04 +020067 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
Kyungmin Park1d8dca62008-11-19 16:23:06 +010068 */
Heiko Schocherf5895d12014-06-24 10:10:04 +020069struct mtd_dev_param {
Kyungmin Park1d8dca62008-11-19 16:23:06 +010070 char name[MTD_PARAM_LEN_MAX];
Heiko Schocherf5895d12014-06-24 10:10:04 +020071 int ubi_num;
Kyungmin Park1d8dca62008-11-19 16:23:06 +010072 int vid_hdr_offs;
Heiko Schocherf5895d12014-06-24 10:10:04 +020073 int max_beb_per1024;
Kyungmin Park1d8dca62008-11-19 16:23:06 +010074};
75
76/* Numbers of elements set in the @mtd_dev_param array */
Heiko Schocherf5895d12014-06-24 10:10:04 +020077static int __initdata mtd_devs;
Kyungmin Park1d8dca62008-11-19 16:23:06 +010078
79/* MTD devices specification parameters */
Heiko Schocherf5895d12014-06-24 10:10:04 +020080static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
81#ifndef __UBOOT__
82#ifdef CONFIG_MTD_UBI_FASTMAP
83/* UBI module parameter to enable fastmap automatically on non-fastmap images */
84static bool fm_autoconvert;
Heiko Schocher94b66de2015-10-22 06:19:21 +020085static bool fm_debug;
Heiko Schocherf5895d12014-06-24 10:10:04 +020086#endif
87#else
88#ifdef CONFIG_MTD_UBI_FASTMAP
Heiko Schocherf5895d12014-06-24 10:10:04 +020089static bool fm_autoconvert = CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT;
Heiko Schocher94b66de2015-10-22 06:19:21 +020090static bool fm_debug = CONFIG_MTD_UBI_FM_DEBUG;
Heiko Schocherf5895d12014-06-24 10:10:04 +020091#endif
Heiko Schocher94b66de2015-10-22 06:19:21 +020092#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +010093
Kyungmin Park1d8dca62008-11-19 16:23:06 +010094/* Slab cache for wear-leveling entries */
95struct kmem_cache *ubi_wl_entry_slab;
96
Heiko Schocherf5895d12014-06-24 10:10:04 +020097#ifndef __UBOOT__
Kyungmin Park1d8dca62008-11-19 16:23:06 +010098/* UBI control character device */
99static struct miscdevice ubi_ctrl_cdev = {
100 .minor = MISC_DYNAMIC_MINOR,
101 .name = "ubi_ctrl",
102 .fops = &ubi_ctrl_cdev_operations,
103};
104#endif
105
106/* All UBI devices in system */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200107#ifndef __UBOOT__
108static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
109#else
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100110struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
Heiko Schocherf5895d12014-06-24 10:10:04 +0200111#endif
Wolfgang Denk9d328a62021-09-27 17:42:38 +0200112
Heiko Schocherf5895d12014-06-24 10:10:04 +0200113#ifndef __UBOOT__
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100114/* Serializes UBI devices creations and removals */
115DEFINE_MUTEX(ubi_devices_mutex);
116
117/* Protects @ubi_devices and @ubi->ref_count */
118static DEFINE_SPINLOCK(ubi_devices_lock);
119
120/* "Show" method for files in '/<sysfs>/class/ubi/' */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200121static ssize_t ubi_version_show(struct class *class,
122 struct class_attribute *attr, char *buf)
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100123{
124 return sprintf(buf, "%d\n", UBI_VERSION);
125}
126
127/* UBI version attribute ('/<sysfs>/class/ubi/version') */
Heiko Schocher94b66de2015-10-22 06:19:21 +0200128static struct class_attribute ubi_class_attrs[] = {
129 __ATTR(version, S_IRUGO, ubi_version_show, NULL),
130 __ATTR_NULL
131};
132
133/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
134struct class ubi_class = {
135 .name = UBI_NAME_STR,
136 .owner = THIS_MODULE,
137 .class_attrs = ubi_class_attrs,
138};
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100139
140static ssize_t dev_attribute_show(struct device *dev,
141 struct device_attribute *attr, char *buf);
142
143/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
144static struct device_attribute dev_eraseblock_size =
145 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
146static struct device_attribute dev_avail_eraseblocks =
147 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
148static struct device_attribute dev_total_eraseblocks =
149 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
150static struct device_attribute dev_volumes_count =
151 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
152static struct device_attribute dev_max_ec =
153 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
154static struct device_attribute dev_reserved_for_bad =
155 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
156static struct device_attribute dev_bad_peb_count =
157 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
158static struct device_attribute dev_max_vol_count =
159 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
160static struct device_attribute dev_min_io_size =
161 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
162static struct device_attribute dev_bgt_enabled =
163 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
164static struct device_attribute dev_mtd_num =
165 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
166#endif
167
168/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200169 * ubi_volume_notify - send a volume change notification.
170 * @ubi: UBI device description object
171 * @vol: volume description object of the changed volume
172 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
173 *
174 * This is a helper function which notifies all subscribers about a volume
175 * change event (creation, removal, re-sizing, re-naming, updating). Returns
176 * zero in case of success and a negative error code in case of failure.
177 */
178int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
179{
Heiko Schocher94b66de2015-10-22 06:19:21 +0200180 int ret;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200181 struct ubi_notification nt;
182
183 ubi_do_get_device_info(ubi, &nt.di);
184 ubi_do_get_volume_info(ubi, vol, &nt.vi);
185
Heiko Schocherf5895d12014-06-24 10:10:04 +0200186 switch (ntype) {
187 case UBI_VOLUME_ADDED:
188 case UBI_VOLUME_REMOVED:
189 case UBI_VOLUME_RESIZED:
190 case UBI_VOLUME_RENAMED:
Heiko Schocher94b66de2015-10-22 06:19:21 +0200191 ret = ubi_update_fastmap(ubi);
192 if (ret)
193 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200194 }
Heiko Schocher94b66de2015-10-22 06:19:21 +0200195
Heiko Schocherf5895d12014-06-24 10:10:04 +0200196 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
197}
198
199/**
200 * ubi_notify_all - send a notification to all volumes.
201 * @ubi: UBI device description object
202 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
203 * @nb: the notifier to call
204 *
205 * This function walks all volumes of UBI device @ubi and sends the @ntype
206 * notification for each volume. If @nb is %NULL, then all registered notifiers
207 * are called, otherwise only the @nb notifier is called. Returns the number of
208 * sent notifications.
209 */
210int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
211{
212 struct ubi_notification nt;
213 int i, count = 0;
214#ifndef __UBOOT__
215 int ret;
216#endif
217
218 ubi_do_get_device_info(ubi, &nt.di);
219
220 mutex_lock(&ubi->device_mutex);
221 for (i = 0; i < ubi->vtbl_slots; i++) {
222 /*
223 * Since the @ubi->device is locked, and we are not going to
224 * change @ubi->volumes, we do not have to lock
225 * @ubi->volumes_lock.
226 */
227 if (!ubi->volumes[i])
228 continue;
229
230 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
231#ifndef __UBOOT__
232 if (nb)
233 nb->notifier_call(nb, ntype, &nt);
234 else
235 ret = blocking_notifier_call_chain(&ubi_notifiers, ntype,
236 &nt);
237#endif
238 count += 1;
239 }
240 mutex_unlock(&ubi->device_mutex);
241
242 return count;
243}
244
245/**
246 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
247 * @nb: the notifier to call
248 *
249 * This function walks all UBI devices and volumes and sends the
250 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
251 * registered notifiers are called, otherwise only the @nb notifier is called.
252 * Returns the number of sent notifications.
253 */
254int ubi_enumerate_volumes(struct notifier_block *nb)
255{
256 int i, count = 0;
257
258 /*
259 * Since the @ubi_devices_mutex is locked, and we are not going to
260 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
261 */
262 for (i = 0; i < UBI_MAX_DEVICES; i++) {
263 struct ubi_device *ubi = ubi_devices[i];
264
265 if (!ubi)
266 continue;
267 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
268 }
269
270 return count;
271}
272
273/**
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100274 * ubi_get_device - get UBI device.
275 * @ubi_num: UBI device number
276 *
277 * This function returns UBI device description object for UBI device number
278 * @ubi_num, or %NULL if the device does not exist. This function increases the
279 * device reference count to prevent removal of the device. In other words, the
280 * device cannot be removed if its reference count is not zero.
281 */
282struct ubi_device *ubi_get_device(int ubi_num)
283{
284 struct ubi_device *ubi;
285
286 spin_lock(&ubi_devices_lock);
287 ubi = ubi_devices[ubi_num];
288 if (ubi) {
289 ubi_assert(ubi->ref_count >= 0);
290 ubi->ref_count += 1;
291 get_device(&ubi->dev);
292 }
293 spin_unlock(&ubi_devices_lock);
294
295 return ubi;
296}
297
298/**
299 * ubi_put_device - drop an UBI device reference.
300 * @ubi: UBI device description object
301 */
302void ubi_put_device(struct ubi_device *ubi)
303{
304 spin_lock(&ubi_devices_lock);
305 ubi->ref_count -= 1;
306 put_device(&ubi->dev);
307 spin_unlock(&ubi_devices_lock);
308}
309
310/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200311 * ubi_get_by_major - get UBI device by character device major number.
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100312 * @major: major number
313 *
314 * This function is similar to 'ubi_get_device()', but it searches the device
315 * by its major number.
316 */
317struct ubi_device *ubi_get_by_major(int major)
318{
319 int i;
320 struct ubi_device *ubi;
321
322 spin_lock(&ubi_devices_lock);
323 for (i = 0; i < UBI_MAX_DEVICES; i++) {
324 ubi = ubi_devices[i];
325 if (ubi && MAJOR(ubi->cdev.dev) == major) {
326 ubi_assert(ubi->ref_count >= 0);
327 ubi->ref_count += 1;
328 get_device(&ubi->dev);
329 spin_unlock(&ubi_devices_lock);
330 return ubi;
331 }
332 }
333 spin_unlock(&ubi_devices_lock);
334
335 return NULL;
336}
337
338/**
339 * ubi_major2num - get UBI device number by character device major number.
340 * @major: major number
341 *
342 * This function searches UBI device number object by its major number. If UBI
343 * device was not found, this function returns -ENODEV, otherwise the UBI device
344 * number is returned.
345 */
346int ubi_major2num(int major)
347{
348 int i, ubi_num = -ENODEV;
349
350 spin_lock(&ubi_devices_lock);
351 for (i = 0; i < UBI_MAX_DEVICES; i++) {
352 struct ubi_device *ubi = ubi_devices[i];
353
354 if (ubi && MAJOR(ubi->cdev.dev) == major) {
355 ubi_num = ubi->ubi_num;
356 break;
357 }
358 }
359 spin_unlock(&ubi_devices_lock);
360
361 return ubi_num;
362}
363
Heiko Schocherf5895d12014-06-24 10:10:04 +0200364#ifndef __UBOOT__
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100365/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
366static ssize_t dev_attribute_show(struct device *dev,
367 struct device_attribute *attr, char *buf)
368{
369 ssize_t ret;
370 struct ubi_device *ubi;
371
372 /*
373 * The below code looks weird, but it actually makes sense. We get the
374 * UBI device reference from the contained 'struct ubi_device'. But it
375 * is unclear if the device was removed or not yet. Indeed, if the
376 * device was removed before we increased its reference count,
377 * 'ubi_get_device()' will return -ENODEV and we fail.
378 *
379 * Remember, 'struct ubi_device' is freed in the release function, so
380 * we still can use 'ubi->ubi_num'.
381 */
382 ubi = container_of(dev, struct ubi_device, dev);
383 ubi = ubi_get_device(ubi->ubi_num);
384 if (!ubi)
385 return -ENODEV;
386
387 if (attr == &dev_eraseblock_size)
388 ret = sprintf(buf, "%d\n", ubi->leb_size);
389 else if (attr == &dev_avail_eraseblocks)
390 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
391 else if (attr == &dev_total_eraseblocks)
392 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
393 else if (attr == &dev_volumes_count)
394 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
395 else if (attr == &dev_max_ec)
396 ret = sprintf(buf, "%d\n", ubi->max_ec);
397 else if (attr == &dev_reserved_for_bad)
398 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
399 else if (attr == &dev_bad_peb_count)
400 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
401 else if (attr == &dev_max_vol_count)
402 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
403 else if (attr == &dev_min_io_size)
404 ret = sprintf(buf, "%d\n", ubi->min_io_size);
405 else if (attr == &dev_bgt_enabled)
406 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
407 else if (attr == &dev_mtd_num)
408 ret = sprintf(buf, "%d\n", ubi->mtd->index);
409 else
410 ret = -EINVAL;
411
412 ubi_put_device(ubi);
413 return ret;
414}
415
Heiko Schocher94b66de2015-10-22 06:19:21 +0200416static struct attribute *ubi_dev_attrs[] = {
417 &dev_eraseblock_size.attr,
418 &dev_avail_eraseblocks.attr,
419 &dev_total_eraseblocks.attr,
420 &dev_volumes_count.attr,
421 &dev_max_ec.attr,
422 &dev_reserved_for_bad.attr,
423 &dev_bad_peb_count.attr,
424 &dev_max_vol_count.attr,
425 &dev_min_io_size.attr,
426 &dev_bgt_enabled.attr,
427 &dev_mtd_num.attr,
428 NULL
429};
430ATTRIBUTE_GROUPS(ubi_dev);
431
Heiko Schocherf5895d12014-06-24 10:10:04 +0200432static void dev_release(struct device *dev)
433{
434 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
435
436 kfree(ubi);
437}
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100438
439/**
440 * ubi_sysfs_init - initialize sysfs for an UBI device.
441 * @ubi: UBI device description object
Heiko Schocherf5895d12014-06-24 10:10:04 +0200442 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
443 * taken
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100444 *
445 * This function returns zero in case of success and a negative error code in
446 * case of failure.
447 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200448static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100449{
450 int err;
451
452 ubi->dev.release = dev_release;
453 ubi->dev.devt = ubi->cdev.dev;
Heiko Schocher94b66de2015-10-22 06:19:21 +0200454 ubi->dev.class = &ubi_class;
455 ubi->dev.groups = ubi_dev_groups;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200456 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100457 err = device_register(&ubi->dev);
458 if (err)
459 return err;
460
Heiko Schocherf5895d12014-06-24 10:10:04 +0200461 *ref = 1;
Heiko Schocher94b66de2015-10-22 06:19:21 +0200462 return 0;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100463}
464
465/**
466 * ubi_sysfs_close - close sysfs for an UBI device.
467 * @ubi: UBI device description object
468 */
469static void ubi_sysfs_close(struct ubi_device *ubi)
470{
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100471 device_unregister(&ubi->dev);
472}
473#endif
474
475/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200476 * kill_volumes - destroy all user volumes.
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100477 * @ubi: UBI device description object
478 */
479static void kill_volumes(struct ubi_device *ubi)
480{
481 int i;
482
483 for (i = 0; i < ubi->vtbl_slots; i++)
484 if (ubi->volumes[i])
485 ubi_free_volume(ubi, ubi->volumes[i]);
486}
487
488/**
489 * uif_init - initialize user interfaces for an UBI device.
490 * @ubi: UBI device description object
Heiko Schocherf5895d12014-06-24 10:10:04 +0200491 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
492 * taken, otherwise set to %0
493 *
494 * This function initializes various user interfaces for an UBI device. If the
495 * initialization fails at an early stage, this function frees all the
496 * resources it allocated, returns an error, and @ref is set to %0. However,
497 * if the initialization fails after the UBI device was registered in the
498 * driver core subsystem, this function takes a reference to @ubi->dev, because
499 * otherwise the release function ('dev_release()') would free whole @ubi
500 * object. The @ref argument is set to %1 in this case. The caller has to put
501 * this reference.
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100502 *
503 * This function returns zero in case of success and a negative error code in
504 * case of failure.
505 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200506static int uif_init(struct ubi_device *ubi, int *ref)
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100507{
508 int i, err;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200509#ifndef __UBOOT__
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100510 dev_t dev;
511#endif
512
Heiko Schocherf5895d12014-06-24 10:10:04 +0200513 *ref = 0;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100514 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
515
516 /*
517 * Major numbers for the UBI character devices are allocated
518 * dynamically. Major numbers of volume character devices are
519 * equivalent to ones of the corresponding UBI character device. Minor
520 * numbers of UBI character devices are 0, while minor numbers of
521 * volume character devices start from 1. Thus, we allocate one major
522 * number and ubi->vtbl_slots + 1 minor numbers.
523 */
524 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
525 if (err) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200526 ubi_err(ubi, "cannot register UBI character devices");
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100527 return err;
528 }
529
530 ubi_assert(MINOR(dev) == 0);
531 cdev_init(&ubi->cdev, &ubi_cdev_operations);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200532 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100533 ubi->cdev.owner = THIS_MODULE;
534
535 err = cdev_add(&ubi->cdev, dev, 1);
536 if (err) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200537 ubi_err(ubi, "cannot add character device");
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100538 goto out_unreg;
539 }
540
Heiko Schocherf5895d12014-06-24 10:10:04 +0200541 err = ubi_sysfs_init(ubi, ref);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100542 if (err)
543 goto out_sysfs;
544
545 for (i = 0; i < ubi->vtbl_slots; i++)
546 if (ubi->volumes[i]) {
547 err = ubi_add_volume(ubi, ubi->volumes[i]);
548 if (err) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200549 ubi_err(ubi, "cannot add volume %d", i);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100550 goto out_volumes;
551 }
552 }
553
554 return 0;
555
556out_volumes:
557 kill_volumes(ubi);
558out_sysfs:
Heiko Schocherf5895d12014-06-24 10:10:04 +0200559 if (*ref)
560 get_device(&ubi->dev);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100561 ubi_sysfs_close(ubi);
562 cdev_del(&ubi->cdev);
563out_unreg:
564 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
Heiko Schocher94b66de2015-10-22 06:19:21 +0200565 ubi_err(ubi, "cannot initialize UBI %s, error %d",
566 ubi->ubi_name, err);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100567 return err;
568}
569
570/**
571 * uif_close - close user interfaces for an UBI device.
572 * @ubi: UBI device description object
Heiko Schocherf5895d12014-06-24 10:10:04 +0200573 *
574 * Note, since this function un-registers UBI volume device objects (@vol->dev),
575 * the memory allocated voe the volumes is freed as well (in the release
576 * function).
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100577 */
578static void uif_close(struct ubi_device *ubi)
579{
580 kill_volumes(ubi);
581 ubi_sysfs_close(ubi);
582 cdev_del(&ubi->cdev);
583 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
584}
585
586/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200587 * ubi_free_internal_volumes - free internal volumes.
588 * @ubi: UBI device description object
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100589 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200590void ubi_free_internal_volumes(struct ubi_device *ubi)
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100591{
Heiko Schocherf5895d12014-06-24 10:10:04 +0200592 int i;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100593
Heiko Schocherf5895d12014-06-24 10:10:04 +0200594 for (i = ubi->vtbl_slots;
595 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
596 kfree(ubi->volumes[i]->eba_tbl);
597 kfree(ubi->volumes[i]);
598 }
599}
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100600
Heiko Schocherf5895d12014-06-24 10:10:04 +0200601static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
602{
603 int limit, device_pebs;
604 uint64_t device_size;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100605
Heiko Schocherf5895d12014-06-24 10:10:04 +0200606 if (!max_beb_per1024)
607 return 0;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100608
Heiko Schocherf5895d12014-06-24 10:10:04 +0200609 /*
610 * Here we are using size of the entire flash chip and
611 * not just the MTD partition size because the maximum
612 * number of bad eraseblocks is a percentage of the
613 * whole device and bad eraseblocks are not fairly
614 * distributed over the flash chip. So the worst case
615 * is that all the bad eraseblocks of the chip are in
616 * the MTD partition we are attaching (ubi->mtd).
617 */
618 device_size = mtd_get_device_size(ubi->mtd);
619 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
620 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
Holger Bruncke5db8e72011-10-10 13:08:19 +0200621
Heiko Schocherf5895d12014-06-24 10:10:04 +0200622 /* Round it up */
623 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
624 limit += 1;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100625
Heiko Schocherf5895d12014-06-24 10:10:04 +0200626 return limit;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100627}
628
629/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200630 * io_init - initialize I/O sub-system for a given UBI device.
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100631 * @ubi: UBI device description object
Heiko Schocherf5895d12014-06-24 10:10:04 +0200632 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100633 *
634 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
635 * assumed:
636 * o EC header is always at offset zero - this cannot be changed;
637 * o VID header starts just after the EC header at the closest address
638 * aligned to @io->hdrs_min_io_size;
639 * o data starts just after the VID header at the closest address aligned to
640 * @io->min_io_size
641 *
642 * This function returns zero in case of success and a negative error code in
643 * case of failure.
644 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200645static int io_init(struct ubi_device *ubi, int max_beb_per1024)
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100646{
Heiko Schocherf5895d12014-06-24 10:10:04 +0200647 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
648 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
649
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100650 if (ubi->mtd->numeraseregions != 0) {
651 /*
652 * Some flashes have several erase regions. Different regions
653 * may have different eraseblock size and other
654 * characteristics. It looks like mostly multi-region flashes
655 * have one "main" region and one or more small regions to
656 * store boot loader code or boot parameters or whatever. I
657 * guess we should just pick the largest region. But this is
658 * not implemented.
659 */
Heiko Schocher94b66de2015-10-22 06:19:21 +0200660 ubi_err(ubi, "multiple regions, not implemented");
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100661 return -EINVAL;
662 }
663
664 if (ubi->vid_hdr_offset < 0)
665 return -EINVAL;
666
667 /*
668 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
669 * physical eraseblocks maximum.
670 */
671
672 ubi->peb_size = ubi->mtd->erasesize;
Stefan Roesed4f92ac2009-06-29 13:30:50 +0200673 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100674 ubi->flash_size = ubi->mtd->size;
675
Heiko Schocherf5895d12014-06-24 10:10:04 +0200676 if (mtd_can_have_bb(ubi->mtd)) {
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100677 ubi->bad_allowed = 1;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200678 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
679 }
680
681 if (ubi->mtd->type == MTD_NORFLASH) {
682 ubi_assert(ubi->mtd->writesize == 1);
683 ubi->nor_flash = 1;
684 }
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100685
686 ubi->min_io_size = ubi->mtd->writesize;
687 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
688
689 /*
690 * Make sure minimal I/O unit is power of 2. Note, there is no
691 * fundamental reason for this assumption. It is just an optimization
692 * which allows us to avoid costly division operations.
693 */
694 if (!is_power_of_2(ubi->min_io_size)) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200695 ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100696 ubi->min_io_size);
697 return -EINVAL;
698 }
699
700 ubi_assert(ubi->hdrs_min_io_size > 0);
701 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
702 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
703
Heiko Schocherf5895d12014-06-24 10:10:04 +0200704 ubi->max_write_size = ubi->mtd->writebufsize;
705 /*
706 * Maximum write size has to be greater or equivalent to min. I/O
707 * size, and be multiple of min. I/O size.
708 */
709 if (ubi->max_write_size < ubi->min_io_size ||
710 ubi->max_write_size % ubi->min_io_size ||
711 !is_power_of_2(ubi->max_write_size)) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200712 ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
Heiko Schocherf5895d12014-06-24 10:10:04 +0200713 ubi->max_write_size, ubi->min_io_size);
714 return -EINVAL;
715 }
716
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100717 /* Calculate default aligned sizes of EC and VID headers */
718 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
719 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
720
Heiko Schocherf5895d12014-06-24 10:10:04 +0200721 dbg_gen("min_io_size %d", ubi->min_io_size);
722 dbg_gen("max_write_size %d", ubi->max_write_size);
723 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
724 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
725 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100726
727 if (ubi->vid_hdr_offset == 0)
728 /* Default offset */
729 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
730 ubi->ec_hdr_alsize;
731 else {
732 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
733 ~(ubi->hdrs_min_io_size - 1);
734 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
735 ubi->vid_hdr_aloffset;
736 }
737
738 /* Similar for the data offset */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200739 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100740 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
741
Heiko Schocherf5895d12014-06-24 10:10:04 +0200742 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
743 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
744 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
745 dbg_gen("leb_start %d", ubi->leb_start);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100746
747 /* The shift must be aligned to 32-bit boundary */
748 if (ubi->vid_hdr_shift % 4) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200749 ubi_err(ubi, "unaligned VID header shift %d",
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100750 ubi->vid_hdr_shift);
751 return -EINVAL;
752 }
753
754 /* Check sanity */
755 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
756 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
757 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
758 ubi->leb_start & (ubi->min_io_size - 1)) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200759 ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100760 ubi->vid_hdr_offset, ubi->leb_start);
761 return -EINVAL;
762 }
763
764 /*
Heiko Schocherf5895d12014-06-24 10:10:04 +0200765 * Set maximum amount of physical erroneous eraseblocks to be 10%.
766 * Erroneous PEB are those which have read errors.
767 */
768 ubi->max_erroneous = ubi->peb_count / 10;
769 if (ubi->max_erroneous < 16)
770 ubi->max_erroneous = 16;
771 dbg_gen("max_erroneous %d", ubi->max_erroneous);
772
773 /*
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100774 * It may happen that EC and VID headers are situated in one minimal
775 * I/O unit. In this case we can only accept this UBI image in
776 * read-only mode.
777 */
778 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200779 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100780 ubi->ro_mode = 1;
781 }
782
783 ubi->leb_size = ubi->peb_size - ubi->leb_start;
784
785 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200786 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
Heiko Schocherf5895d12014-06-24 10:10:04 +0200787 ubi->mtd->index);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100788 ubi->ro_mode = 1;
789 }
790
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100791 /*
Heiko Schocherf5895d12014-06-24 10:10:04 +0200792 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100793 * unfortunately, MTD does not provide this information. We should loop
794 * over all physical eraseblocks and invoke mtd->block_is_bad() for
Heiko Schocherf5895d12014-06-24 10:10:04 +0200795 * each physical eraseblock. So, we leave @ubi->bad_peb_count
796 * uninitialized so far.
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100797 */
798
799 return 0;
800}
801
802/**
803 * autoresize - re-size the volume which has the "auto-resize" flag set.
804 * @ubi: UBI device description object
805 * @vol_id: ID of the volume to re-size
806 *
Heiko Schocherf5895d12014-06-24 10:10:04 +0200807 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100808 * the volume table to the largest possible size. See comments in ubi-header.h
809 * for more description of the flag. Returns zero in case of success and a
810 * negative error code in case of failure.
811 */
812static int autoresize(struct ubi_device *ubi, int vol_id)
813{
814 struct ubi_volume_desc desc;
815 struct ubi_volume *vol = ubi->volumes[vol_id];
816 int err, old_reserved_pebs = vol->reserved_pebs;
817
Heiko Schocherf5895d12014-06-24 10:10:04 +0200818 if (ubi->ro_mode) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200819 ubi_warn(ubi, "skip auto-resize because of R/O mode");
Heiko Schocherf5895d12014-06-24 10:10:04 +0200820 return 0;
821 }
822
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100823 /*
824 * Clear the auto-resize flag in the volume in-memory copy of the
Heiko Schocherf5895d12014-06-24 10:10:04 +0200825 * volume table, and 'ubi_resize_volume()' will propagate this change
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100826 * to the flash.
827 */
828 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
829
830 if (ubi->avail_pebs == 0) {
831 struct ubi_vtbl_record vtbl_rec;
832
833 /*
Heiko Schocherf5895d12014-06-24 10:10:04 +0200834 * No available PEBs to re-size the volume, clear the flag on
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100835 * flash and exit.
836 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200837 vtbl_rec = ubi->vtbl[vol_id];
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100838 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
839 if (err)
Heiko Schocher94b66de2015-10-22 06:19:21 +0200840 ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100841 vol_id);
842 } else {
843 desc.vol = vol;
844 err = ubi_resize_volume(&desc,
845 old_reserved_pebs + ubi->avail_pebs);
846 if (err)
Heiko Schocher94b66de2015-10-22 06:19:21 +0200847 ubi_err(ubi, "cannot auto-resize volume %d",
848 vol_id);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100849 }
850
851 if (err)
852 return err;
853
Heiko Schocher94b66de2015-10-22 06:19:21 +0200854 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
855 vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100856 return 0;
857}
858
859/**
860 * ubi_attach_mtd_dev - attach an MTD device.
Heiko Schocherf5895d12014-06-24 10:10:04 +0200861 * @mtd: MTD device description object
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100862 * @ubi_num: number to assign to the new UBI device
863 * @vid_hdr_offset: VID header offset
Heiko Schocherf5895d12014-06-24 10:10:04 +0200864 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100865 *
866 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
867 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
Heiko Schocherf5895d12014-06-24 10:10:04 +0200868 * which case this function finds a vacant device number and assigns it
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100869 * automatically. Returns the new UBI device number in case of success and a
870 * negative error code in case of failure.
871 *
872 * Note, the invocations of this function has to be serialized by the
873 * @ubi_devices_mutex.
874 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200875int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
876 int vid_hdr_offset, int max_beb_per1024)
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100877{
878 struct ubi_device *ubi;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200879 int i, err, ref = 0;
880
881 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
882 return -EINVAL;
883
884 if (!max_beb_per1024)
885 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100886
887 /*
888 * Check if we already have the same MTD device attached.
889 *
890 * Note, this function assumes that UBI devices creations and deletions
891 * are serialized, so it does not take the &ubi_devices_lock.
892 */
893 for (i = 0; i < UBI_MAX_DEVICES; i++) {
894 ubi = ubi_devices[i];
895 if (ubi && mtd->index == ubi->mtd->index) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200896 ubi_err(ubi, "mtd%d is already attached to ubi%d",
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100897 mtd->index, i);
898 return -EEXIST;
899 }
900 }
901
902 /*
903 * Make sure this MTD device is not emulated on top of an UBI volume
904 * already. Well, generally this recursion works fine, but there are
905 * different problems like the UBI module takes a reference to itself
906 * by attaching (and thus, opening) the emulated MTD device. This
907 * results in inability to unload the module. And in general it makes
908 * no sense to attach emulated MTD devices, so we prohibit this.
909 */
910 if (mtd->type == MTD_UBIVOLUME) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200911 ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
Heiko Schocherf5895d12014-06-24 10:10:04 +0200912 mtd->index);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100913 return -EINVAL;
914 }
915
916 if (ubi_num == UBI_DEV_NUM_AUTO) {
917 /* Search for an empty slot in the @ubi_devices array */
918 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
919 if (!ubi_devices[ubi_num])
920 break;
921 if (ubi_num == UBI_MAX_DEVICES) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200922 ubi_err(ubi, "only %d UBI devices may be created",
Heiko Schocherf5895d12014-06-24 10:10:04 +0200923 UBI_MAX_DEVICES);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100924 return -ENFILE;
925 }
926 } else {
927 if (ubi_num >= UBI_MAX_DEVICES)
928 return -EINVAL;
929
930 /* Make sure ubi_num is not busy */
931 if (ubi_devices[ubi_num]) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200932 ubi_err(ubi, "already exists");
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100933 return -EEXIST;
934 }
935 }
936
937 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
938 if (!ubi)
939 return -ENOMEM;
940
941 ubi->mtd = mtd;
942 ubi->ubi_num = ubi_num;
943 ubi->vid_hdr_offset = vid_hdr_offset;
944 ubi->autoresize_vol_id = -1;
945
Heiko Schocherf5895d12014-06-24 10:10:04 +0200946#ifdef CONFIG_MTD_UBI_FASTMAP
947 ubi->fm_pool.used = ubi->fm_pool.size = 0;
948 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
949
950 /*
951 * fm_pool.max_size is 5% of the total number of PEBs but it's also
952 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
953 */
954 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
955 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
Heiko Schocher94b66de2015-10-22 06:19:21 +0200956 ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
957 UBI_FM_MIN_POOL_SIZE);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200958
Heiko Schocher94b66de2015-10-22 06:19:21 +0200959 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200960 ubi->fm_disabled = !fm_autoconvert;
Heiko Schocher94b66de2015-10-22 06:19:21 +0200961 if (fm_debug)
962 ubi_enable_dbg_chk_fastmap(ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200963
964 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
965 <= UBI_FM_MAX_START) {
Heiko Schocher94b66de2015-10-22 06:19:21 +0200966 ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
Heiko Schocherf5895d12014-06-24 10:10:04 +0200967 UBI_FM_MAX_START);
968 ubi->fm_disabled = 1;
969 }
970
Heiko Schocher94b66de2015-10-22 06:19:21 +0200971 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
972 ubi_msg(ubi, "default fastmap WL pool size: %d",
973 ubi->fm_wl_pool.max_size);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200974#else
975 ubi->fm_disabled = 1;
976#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100977 mutex_init(&ubi->buf_mutex);
978 mutex_init(&ubi->ckvol_mutex);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200979 mutex_init(&ubi->device_mutex);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100980 spin_lock_init(&ubi->volumes_lock);
Heiko Schocher94b66de2015-10-22 06:19:21 +0200981 init_rwsem(&ubi->fm_protect);
982 init_rwsem(&ubi->fm_eba_sem);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100983
Heiko Schocher94b66de2015-10-22 06:19:21 +0200984 ubi_msg(ubi, "attaching mtd%d", mtd->index);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100985
Heiko Schocherf5895d12014-06-24 10:10:04 +0200986 err = io_init(ubi, max_beb_per1024);
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100987 if (err)
988 goto out_free;
989
Stefan Roese920863d2008-12-10 10:28:33 +0100990 err = -ENOMEM;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200991 ubi->peb_buf = vmalloc(ubi->peb_size);
992 if (!ubi->peb_buf)
Stefan Roese920863d2008-12-10 10:28:33 +0100993 goto out_free;
Kyungmin Park1d8dca62008-11-19 16:23:06 +0100994
Heiko Schocherf5895d12014-06-24 10:10:04 +0200995#ifdef CONFIG_MTD_UBI_FASTMAP
996 ubi->fm_size = ubi_calc_fm_size(ubi);
997 ubi->fm_buf = vzalloc(ubi->fm_size);
998 if (!ubi->fm_buf)
Stefan Roese920863d2008-12-10 10:28:33 +0100999 goto out_free;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001000#endif
Heiko Schocherf5895d12014-06-24 10:10:04 +02001001 err = ubi_attach(ubi, 0);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001002 if (err) {
Heiko Schocher94b66de2015-10-22 06:19:21 +02001003 ubi_err(ubi, "failed to attach mtd%d, error %d",
1004 mtd->index, err);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001005 goto out_free;
1006 }
1007
1008 if (ubi->autoresize_vol_id != -1) {
1009 err = autoresize(ubi, ubi->autoresize_vol_id);
1010 if (err)
1011 goto out_detach;
1012 }
1013
Heiko Schocherf5895d12014-06-24 10:10:04 +02001014 err = uif_init(ubi, &ref);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001015 if (err)
1016 goto out_detach;
1017
Heiko Schocherf5895d12014-06-24 10:10:04 +02001018 err = ubi_debugfs_init_dev(ubi);
1019 if (err)
1020 goto out_uif;
1021
1022 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001023 if (IS_ERR(ubi->bgt_thread)) {
1024 err = PTR_ERR(ubi->bgt_thread);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001025 ubi_err(ubi, "cannot spawn \"%s\", error %d",
1026 ubi->bgt_name, err);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001027 goto out_debugfs;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001028 }
1029
Heiko Schocher94b66de2015-10-22 06:19:21 +02001030 ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
1031 mtd->index, mtd->name, ubi->flash_size >> 20);
1032 ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
Heiko Schocherf5895d12014-06-24 10:10:04 +02001033 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001034 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
Heiko Schocherf5895d12014-06-24 10:10:04 +02001035 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001036 ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
Heiko Schocherf5895d12014-06-24 10:10:04 +02001037 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001038 ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
Heiko Schocherf5895d12014-06-24 10:10:04 +02001039 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001040 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
Heiko Schocherf5895d12014-06-24 10:10:04 +02001041 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1042 ubi->vtbl_slots);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001043 ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
Heiko Schocherf5895d12014-06-24 10:10:04 +02001044 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1045 ubi->image_seq);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001046 ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
Heiko Schocherf5895d12014-06-24 10:10:04 +02001047 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001048
Heiko Schocherf5895d12014-06-24 10:10:04 +02001049 /*
1050 * The below lock makes sure we do not race with 'ubi_thread()' which
1051 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1052 */
1053 spin_lock(&ubi->wl_lock);
1054 ubi->thread_enabled = 1;
Heiko Schocher94b66de2015-10-22 06:19:21 +02001055#ifndef __UBOOT__
Heiko Schocherf5895d12014-06-24 10:10:04 +02001056 wake_up_process(ubi->bgt_thread);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001057#else
Richard Weinbergerfc168682018-02-08 07:29:52 +01001058 ubi_do_worker(ubi);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001059#endif
1060
Heiko Schocherf5895d12014-06-24 10:10:04 +02001061 spin_unlock(&ubi->wl_lock);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001062
1063 ubi_devices[ubi_num] = ubi;
Heiko Schocherf5895d12014-06-24 10:10:04 +02001064 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001065 return ubi_num;
1066
Heiko Schocherf5895d12014-06-24 10:10:04 +02001067out_debugfs:
1068 ubi_debugfs_exit_dev(ubi);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001069out_uif:
Heiko Schocherf5895d12014-06-24 10:10:04 +02001070 get_device(&ubi->dev);
1071 ubi_assert(ref);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001072 uif_close(ubi);
1073out_detach:
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001074 ubi_wl_close(ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001075 ubi_free_internal_volumes(ubi);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001076 vfree(ubi->vtbl);
1077out_free:
Heiko Schocherf5895d12014-06-24 10:10:04 +02001078 vfree(ubi->peb_buf);
1079 vfree(ubi->fm_buf);
1080 if (ref)
1081 put_device(&ubi->dev);
1082 else
1083 kfree(ubi);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001084 return err;
1085}
1086
1087/**
1088 * ubi_detach_mtd_dev - detach an MTD device.
1089 * @ubi_num: UBI device number to detach from
1090 * @anyway: detach MTD even if device reference count is not zero
1091 *
1092 * This function destroys an UBI device number @ubi_num and detaches the
1093 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
1094 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
1095 * exist.
1096 *
1097 * Note, the invocations of this function has to be serialized by the
1098 * @ubi_devices_mutex.
1099 */
1100int ubi_detach_mtd_dev(int ubi_num, int anyway)
1101{
1102 struct ubi_device *ubi;
1103
1104 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1105 return -EINVAL;
1106
Heiko Schocherf5895d12014-06-24 10:10:04 +02001107 ubi = ubi_get_device(ubi_num);
1108 if (!ubi)
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001109 return -EINVAL;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001110
Heiko Schocherf5895d12014-06-24 10:10:04 +02001111 spin_lock(&ubi_devices_lock);
1112 put_device(&ubi->dev);
1113 ubi->ref_count -= 1;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001114 if (ubi->ref_count) {
1115 if (!anyway) {
1116 spin_unlock(&ubi_devices_lock);
1117 return -EBUSY;
1118 }
1119 /* This may only happen if there is a bug */
Heiko Schocher94b66de2015-10-22 06:19:21 +02001120 ubi_err(ubi, "%s reference count %d, destroy anyway",
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001121 ubi->ubi_name, ubi->ref_count);
1122 }
1123 ubi_devices[ubi_num] = NULL;
1124 spin_unlock(&ubi_devices_lock);
1125
1126 ubi_assert(ubi_num == ubi->ubi_num);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001127 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001128 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001129#ifdef CONFIG_MTD_UBI_FASTMAP
1130 /* If we don't write a new fastmap at detach time we lose all
Heiko Schocher94b66de2015-10-22 06:19:21 +02001131 * EC updates that have been made since the last written fastmap.
1132 * In case of fastmap debugging we omit the update to simulate an
1133 * unclean shutdown. */
1134 if (!ubi_dbg_chk_fastmap(ubi))
1135 ubi_update_fastmap(ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001136#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001137 /*
1138 * Before freeing anything, we have to stop the background thread to
1139 * prevent it from doing anything on this device while we are freeing.
1140 */
1141 if (ubi->bgt_thread)
1142 kthread_stop(ubi->bgt_thread);
1143
Heiko Schocherf5895d12014-06-24 10:10:04 +02001144 /*
1145 * Get a reference to the device in order to prevent 'dev_release()'
1146 * from freeing the @ubi object.
1147 */
1148 get_device(&ubi->dev);
1149
1150 ubi_debugfs_exit_dev(ubi);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001151 uif_close(ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001152
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001153 ubi_wl_close(ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001154 ubi_free_internal_volumes(ubi);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001155 vfree(ubi->vtbl);
1156 put_mtd_device(ubi->mtd);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001157 vfree(ubi->peb_buf);
1158 vfree(ubi->fm_buf);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001159 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001160 put_device(&ubi->dev);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001161 return 0;
1162}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001163
1164#ifndef __UBOOT__
1165/**
1166 * open_mtd_by_chdev - open an MTD device by its character device node path.
1167 * @mtd_dev: MTD character device node path
1168 *
1169 * This helper function opens an MTD device by its character node device path.
1170 * Returns MTD device description object in case of success and a negative
1171 * error code in case of failure.
1172 */
1173static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1174{
1175 int err, major, minor, mode;
1176 struct path path;
1177
1178 /* Probably this is an MTD character device node path */
1179 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1180 if (err)
1181 return ERR_PTR(err);
1182
1183 /* MTD device number is defined by the major / minor numbers */
Heiko Schocher94b66de2015-10-22 06:19:21 +02001184 major = imajor(d_backing_inode(path.dentry));
1185 minor = iminor(d_backing_inode(path.dentry));
1186 mode = d_backing_inode(path.dentry)->i_mode;
Heiko Schocherf5895d12014-06-24 10:10:04 +02001187 path_put(&path);
1188 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
1189 return ERR_PTR(-EINVAL);
1190
1191 if (minor & 1)
1192 /*
1193 * Just do not think the "/dev/mtdrX" devices support is need,
1194 * so do not support them to avoid doing extra work.
1195 */
1196 return ERR_PTR(-EINVAL);
1197
1198 return get_mtd_device(NULL, minor / 2);
1199}
1200#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001201
1202/**
Heiko Schocherf5895d12014-06-24 10:10:04 +02001203 * open_mtd_device - open MTD device by name, character device path, or number.
1204 * @mtd_dev: name, character device node path, or MTD device device number
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001205 *
1206 * This function tries to open and MTD device described by @mtd_dev string,
Heiko Schocherf5895d12014-06-24 10:10:04 +02001207 * which is first treated as ASCII MTD device number, and if it is not true, it
1208 * is treated as MTD device name, and if that is also not true, it is treated
1209 * as MTD character device node path. Returns MTD device description object in
1210 * case of success and a negative error code in case of failure.
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001211 */
1212static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1213{
1214 struct mtd_info *mtd;
1215 int mtd_num;
1216 char *endp;
1217
1218 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1219 if (*endp != '\0' || mtd_dev == endp) {
1220 /*
1221 * This does not look like an ASCII integer, probably this is
1222 * MTD device name.
1223 */
1224 mtd = get_mtd_device_nm(mtd_dev);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001225#ifndef __UBOOT__
1226 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1227 /* Probably this is an MTD character device node path */
1228 mtd = open_mtd_by_chdev(mtd_dev);
1229#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001230 } else
1231 mtd = get_mtd_device(NULL, mtd_num);
1232
1233 return mtd;
1234}
1235
Heiko Schocherf5895d12014-06-24 10:10:04 +02001236#ifndef __UBOOT__
1237static int __init ubi_init(void)
1238#else
1239int ubi_init(void)
1240#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001241{
1242 int err, i, k;
1243
1244 /* Ensure that EC and VID headers have correct size */
1245 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1246 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1247
1248 if (mtd_devs > UBI_MAX_DEVICES) {
Stefan Roesed6d09012018-05-29 15:28:54 +02001249 pr_err("UBI error: too many MTD devices, maximum is %d\n",
Heiko Schocher94b66de2015-10-22 06:19:21 +02001250 UBI_MAX_DEVICES);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001251 return -EINVAL;
1252 }
1253
1254 /* Create base sysfs directory and sysfs files */
Heiko Schocher94b66de2015-10-22 06:19:21 +02001255 err = class_register(&ubi_class);
1256 if (err < 0)
1257 return err;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001258
1259 err = misc_register(&ubi_ctrl_cdev);
1260 if (err) {
Stefan Roesed6d09012018-05-29 15:28:54 +02001261 pr_err("UBI error: cannot register device\n");
Heiko Schocher94b66de2015-10-22 06:19:21 +02001262 goto out;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001263 }
1264
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001265 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
1266 sizeof(struct ubi_wl_entry),
1267 0, 0, NULL);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001268 if (!ubi_wl_entry_slab) {
1269 err = -ENOMEM;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001270 goto out_dev_unreg;
Heiko Schocherf5895d12014-06-24 10:10:04 +02001271 }
1272
1273 err = ubi_debugfs_init();
1274 if (err)
1275 goto out_slab;
1276
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001277
1278 /* Attach MTD devices */
1279 for (i = 0; i < mtd_devs; i++) {
1280 struct mtd_dev_param *p = &mtd_dev_param[i];
1281 struct mtd_info *mtd;
1282
1283 cond_resched();
1284
1285 mtd = open_mtd_device(p->name);
1286 if (IS_ERR(mtd)) {
1287 err = PTR_ERR(mtd);
Stefan Roesed6d09012018-05-29 15:28:54 +02001288 pr_err("UBI error: cannot open mtd %s, error %d\n",
Heiko Schocher94b66de2015-10-22 06:19:21 +02001289 p->name, err);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001290 /* See comment below re-ubi_is_module(). */
1291 if (ubi_is_module())
1292 goto out_detach;
1293 continue;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001294 }
1295
1296 mutex_lock(&ubi_devices_mutex);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001297 err = ubi_attach_mtd_dev(mtd, p->ubi_num,
1298 p->vid_hdr_offs, p->max_beb_per1024);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001299 mutex_unlock(&ubi_devices_mutex);
1300 if (err < 0) {
Stefan Roesed6d09012018-05-29 15:28:54 +02001301 pr_err("UBI error: cannot attach mtd%d\n",
Heiko Schocher94b66de2015-10-22 06:19:21 +02001302 mtd->index);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001303 put_mtd_device(mtd);
1304
1305 /*
1306 * Originally UBI stopped initializing on any error.
1307 * However, later on it was found out that this
1308 * behavior is not very good when UBI is compiled into
1309 * the kernel and the MTD devices to attach are passed
1310 * through the command line. Indeed, UBI failure
1311 * stopped whole boot sequence.
1312 *
1313 * To fix this, we changed the behavior for the
1314 * non-module case, but preserved the old behavior for
1315 * the module case, just for compatibility. This is a
1316 * little inconsistent, though.
1317 */
1318 if (ubi_is_module())
1319 goto out_detach;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001320 }
1321 }
1322
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001323 err = ubiblock_init();
1324 if (err) {
Stefan Roesed6d09012018-05-29 15:28:54 +02001325 pr_err("UBI error: block: cannot initialize, error %d\n", err);
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001326
1327 /* See comment above re-ubi_is_module(). */
1328 if (ubi_is_module())
1329 goto out_detach;
1330 }
1331
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001332 return 0;
1333
1334out_detach:
1335 for (k = 0; k < i; k++)
1336 if (ubi_devices[k]) {
1337 mutex_lock(&ubi_devices_mutex);
1338 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1339 mutex_unlock(&ubi_devices_mutex);
1340 }
Heiko Schocherf5895d12014-06-24 10:10:04 +02001341 ubi_debugfs_exit();
1342out_slab:
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001343 kmem_cache_destroy(ubi_wl_entry_slab);
1344out_dev_unreg:
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001345 misc_deregister(&ubi_ctrl_cdev);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001346out:
Heiko Schocher131a8242015-01-20 09:05:23 +01001347#ifdef __UBOOT__
1348 /* Reset any globals that the driver depends on being zeroed */
1349 mtd_devs = 0;
1350#endif
Heiko Schocher94b66de2015-10-22 06:19:21 +02001351 class_unregister(&ubi_class);
Stefan Roesed6d09012018-05-29 15:28:54 +02001352 pr_err("UBI error: cannot initialize UBI, error %d\n", err);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001353 return err;
1354}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001355late_initcall(ubi_init);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001356
Heiko Schocherf5895d12014-06-24 10:10:04 +02001357#ifndef __UBOOT__
1358static void __exit ubi_exit(void)
1359#else
1360void ubi_exit(void)
1361#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001362{
1363 int i;
1364
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001365 ubiblock_exit();
1366
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001367 for (i = 0; i < UBI_MAX_DEVICES; i++)
1368 if (ubi_devices[i]) {
1369 mutex_lock(&ubi_devices_mutex);
1370 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1371 mutex_unlock(&ubi_devices_mutex);
1372 }
Heiko Schocherf5895d12014-06-24 10:10:04 +02001373 ubi_debugfs_exit();
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001374 kmem_cache_destroy(ubi_wl_entry_slab);
1375 misc_deregister(&ubi_ctrl_cdev);
Heiko Schocher94b66de2015-10-22 06:19:21 +02001376 class_unregister(&ubi_class);
Heiko Schocher131a8242015-01-20 09:05:23 +01001377#ifdef __UBOOT__
1378 /* Reset any globals that the driver depends on being zeroed */
1379 mtd_devs = 0;
1380#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001381}
1382module_exit(ubi_exit);
1383
1384/**
Heiko Schocherf5895d12014-06-24 10:10:04 +02001385 * bytes_str_to_int - convert a number of bytes string into an integer.
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001386 * @str: the string to convert
1387 *
1388 * This function returns positive resulting integer in case of success and a
1389 * negative error code in case of failure.
1390 */
1391static int __init bytes_str_to_int(const char *str)
1392{
1393 char *endp;
1394 unsigned long result;
1395
1396 result = simple_strtoul(str, &endp, 0);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001397 if (str == endp || result >= INT_MAX) {
Heiko Schocher94b66de2015-10-22 06:19:21 +02001398 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001399 return -EINVAL;
1400 }
1401
1402 switch (*endp) {
1403 case 'G':
1404 result *= 1024;
1405 case 'M':
1406 result *= 1024;
1407 case 'K':
1408 result *= 1024;
1409 if (endp[1] == 'i' && endp[2] == 'B')
1410 endp += 2;
1411 case '\0':
1412 break;
1413 default:
Heiko Schocher94b66de2015-10-22 06:19:21 +02001414 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001415 return -EINVAL;
1416 }
1417
1418 return result;
1419}
1420
Heiko Schocherf5895d12014-06-24 10:10:04 +02001421int kstrtoint(const char *s, unsigned int base, int *res)
1422{
1423 unsigned long long tmp;
1424
1425 tmp = simple_strtoull(s, NULL, base);
1426 if (tmp != (unsigned long long)(int)tmp)
1427 return -ERANGE;
1428
1429 return (int)tmp;
1430}
1431
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001432/**
1433 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
1434 * @val: the parameter value to parse
1435 * @kp: not used
1436 *
1437 * This function returns zero in case of success and a negative error code in
1438 * case of error.
1439 */
Heiko Schocherf5895d12014-06-24 10:10:04 +02001440#ifndef __UBOOT__
1441static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1442#else
1443int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1444#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001445{
1446 int i, len;
1447 struct mtd_dev_param *p;
1448 char buf[MTD_PARAM_LEN_MAX];
1449 char *pbuf = &buf[0];
Heiko Schocherf5895d12014-06-24 10:10:04 +02001450 char *tokens[MTD_PARAM_MAX_COUNT], *token;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001451
1452 if (!val)
1453 return -EINVAL;
1454
1455 if (mtd_devs == UBI_MAX_DEVICES) {
Heiko Schocher94b66de2015-10-22 06:19:21 +02001456 pr_err("UBI error: too many parameters, max. is %d\n",
1457 UBI_MAX_DEVICES);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001458 return -EINVAL;
1459 }
1460
1461 len = strnlen(val, MTD_PARAM_LEN_MAX);
1462 if (len == MTD_PARAM_LEN_MAX) {
Heiko Schocher94b66de2015-10-22 06:19:21 +02001463 pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1464 val, MTD_PARAM_LEN_MAX);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001465 return -EINVAL;
1466 }
1467
1468 if (len == 0) {
Heiko Schocherf5895d12014-06-24 10:10:04 +02001469 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001470 return 0;
1471 }
1472
1473 strcpy(buf, val);
1474
1475 /* Get rid of the final newline */
1476 if (buf[len - 1] == '\n')
1477 buf[len - 1] = '\0';
1478
Heiko Schocherf5895d12014-06-24 10:10:04 +02001479 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001480 tokens[i] = strsep(&pbuf, ",");
1481
1482 if (pbuf) {
Heiko Schocher94b66de2015-10-22 06:19:21 +02001483 pr_err("UBI error: too many arguments at \"%s\"\n", val);
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001484 return -EINVAL;
1485 }
1486
1487 p = &mtd_dev_param[mtd_devs];
1488 strcpy(&p->name[0], tokens[0]);
1489
Heiko Schocherf5895d12014-06-24 10:10:04 +02001490 token = tokens[1];
1491 if (token) {
1492 p->vid_hdr_offs = bytes_str_to_int(token);
1493
1494 if (p->vid_hdr_offs < 0)
1495 return p->vid_hdr_offs;
1496 }
1497
1498 token = tokens[2];
1499 if (token) {
1500 int err = kstrtoint(token, 10, &p->max_beb_per1024);
1501
1502 if (err) {
Heiko Schocher94b66de2015-10-22 06:19:21 +02001503 pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1504 token);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001505 return -EINVAL;
1506 }
1507 }
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001508
Heiko Schocherf5895d12014-06-24 10:10:04 +02001509 token = tokens[3];
1510 if (token) {
1511 int err = kstrtoint(token, 10, &p->ubi_num);
1512
1513 if (err) {
Heiko Schocher94b66de2015-10-22 06:19:21 +02001514 pr_err("UBI error: bad value for ubi_num parameter: %s",
1515 token);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001516 return -EINVAL;
1517 }
1518 } else
1519 p->ubi_num = UBI_DEV_NUM_AUTO;
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001520
1521 mtd_devs += 1;
1522 return 0;
1523}
1524
1525module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001526MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001527 "Multiple \"mtd\" parameters may be specified.\n"
Heiko Schocherf5895d12014-06-24 10:10:04 +02001528 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1529 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1530 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1531 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1532 "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
1533 "\n"
1534 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1535 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1536 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1537 "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
1538 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
1539#ifdef CONFIG_MTD_UBI_FASTMAP
1540module_param(fm_autoconvert, bool, 0644);
1541MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
Heiko Schocher94b66de2015-10-22 06:19:21 +02001542module_param(fm_debug, bool, 0);
1543MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
Heiko Schocherf5895d12014-06-24 10:10:04 +02001544#endif
Kyungmin Park1d8dca62008-11-19 16:23:06 +01001545MODULE_VERSION(__stringify(UBI_VERSION));
1546MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1547MODULE_AUTHOR("Artem Bityutskiy");
1548MODULE_LICENSE("GPL");