blob: 41763a189790041cd311c1fd608a79c186b783e9 [file] [log] [blame]
Stefan Roese2fc10f62009-03-19 15:35:05 +01001/*
2 * This file is part of UBIFS.
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation.
5 *
Heiko Schocherf5895d12014-06-24 10:10:04 +02006 * SPDX-License-Identifier: GPL-2.0+
Stefan Roese2fc10f62009-03-19 15:35:05 +01007 *
8 * Authors: Artem Bityutskiy (Битюцкий Артём)
9 * Adrian Hunter
10 */
11
12/*
13 * This file implements UBIFS initialization and VFS superblock operations. Some
14 * initialization stuff which is rather large and complex is placed at
15 * corresponding subsystems, but most of it is here.
16 */
17
Heiko Schocherf5895d12014-06-24 10:10:04 +020018#ifndef __UBOOT__
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/kthread.h>
24#include <linux/parser.h>
25#include <linux/seq_file.h>
26#include <linux/mount.h>
Stefan Roese2fc10f62009-03-19 15:35:05 +010027#include <linux/math64.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020028#include <linux/writeback.h>
29#else
Stefan Roese2fc10f62009-03-19 15:35:05 +010030
Simon Glassa87fc0a2015-09-02 17:24:57 -060031#include <common.h>
32#include <malloc.h>
33#include <memalign.h>
Heiko Schocherf5895d12014-06-24 10:10:04 +020034#include <linux/compat.h>
35#include <linux/stat.h>
36#include <linux/err.h>
37#include "ubifs.h"
38#include <ubi_uboot.h>
39#include <mtd/ubi-user.h>
Stefan Roese2fc10f62009-03-19 15:35:05 +010040
Heiko Schocherf5895d12014-06-24 10:10:04 +020041struct dentry;
42struct file;
43struct iattr;
44struct kstat;
45struct vfsmount;
Stefan Roese2fc10f62009-03-19 15:35:05 +010046
Heiko Schocherf5895d12014-06-24 10:10:04 +020047#define INODE_LOCKED_MAX 64
Stefan Roese2fc10f62009-03-19 15:35:05 +010048
Heiko Schocherf5895d12014-06-24 10:10:04 +020049struct super_block *ubifs_sb;
50LIST_HEAD(super_blocks);
Stefan Roese2fc10f62009-03-19 15:35:05 +010051
Heiko Schocherf5895d12014-06-24 10:10:04 +020052static struct inode *inodes_locked_down[INODE_LOCKED_MAX];
Stefan Roese2fc10f62009-03-19 15:35:05 +010053
Heiko Schocherf5895d12014-06-24 10:10:04 +020054int set_anon_super(struct super_block *s, void *data)
Stefan Roese2fc10f62009-03-19 15:35:05 +010055{
Stefan Roese2fc10f62009-03-19 15:35:05 +010056 return 0;
57}
58
Stefan Roese2fc10f62009-03-19 15:35:05 +010059struct inode *iget_locked(struct super_block *sb, unsigned long ino)
60{
61 struct inode *inode;
62
Marcel Ziswilerabc574b2015-08-18 13:06:37 +020063 inode = (struct inode *)malloc_cache_aligned(
64 sizeof(struct ubifs_inode));
Stefan Roese2fc10f62009-03-19 15:35:05 +010065 if (inode) {
66 inode->i_ino = ino;
67 inode->i_sb = sb;
68 list_add(&inode->i_sb_list, &sb->s_inodes);
69 inode->i_state = I_LOCK | I_NEW;
70 }
71
72 return inode;
73}
74
Heiko Schocherf5895d12014-06-24 10:10:04 +020075void iget_failed(struct inode *inode)
76{
77}
78
Stefan Roese2fc10f62009-03-19 15:35:05 +010079int ubifs_iput(struct inode *inode)
80{
81 list_del_init(&inode->i_sb_list);
82
83 free(inode);
84 return 0;
85}
86
87/*
88 * Lock (save) inode in inode array for readback after recovery
89 */
90void iput(struct inode *inode)
91{
92 int i;
93 struct inode *ino;
94
95 /*
96 * Search end of list
97 */
98 for (i = 0; i < INODE_LOCKED_MAX; i++) {
99 if (inodes_locked_down[i] == NULL)
100 break;
101 }
102
103 if (i >= INODE_LOCKED_MAX) {
104 ubifs_err("Error, can't lock (save) more inodes while recovery!!!");
105 return;
106 }
107
108 /*
109 * Allocate and use new inode
110 */
Marcel Ziswilerabc574b2015-08-18 13:06:37 +0200111 ino = (struct inode *)malloc_cache_aligned(sizeof(struct ubifs_inode));
Stefan Roese2fc10f62009-03-19 15:35:05 +0100112 memcpy(ino, inode, sizeof(struct ubifs_inode));
113
114 /*
115 * Finally save inode in array
116 */
117 inodes_locked_down[i] = ino;
118}
119
Heiko Schocherf5895d12014-06-24 10:10:04 +0200120/* from fs/inode.c */
121/**
122 * clear_nlink - directly zero an inode's link count
123 * @inode: inode
124 *
125 * This is a low-level filesystem helper to replace any
126 * direct filesystem manipulation of i_nlink. See
127 * drop_nlink() for why we care about i_nlink hitting zero.
128 */
129void clear_nlink(struct inode *inode)
130{
131 if (inode->i_nlink) {
132 inode->__i_nlink = 0;
133 atomic_long_inc(&inode->i_sb->s_remove_count);
134 }
135}
136EXPORT_SYMBOL(clear_nlink);
137
138/**
139 * set_nlink - directly set an inode's link count
140 * @inode: inode
141 * @nlink: new nlink (should be non-zero)
142 *
143 * This is a low-level filesystem helper to replace any
144 * direct filesystem manipulation of i_nlink.
145 */
146void set_nlink(struct inode *inode, unsigned int nlink)
147{
148 if (!nlink) {
149 clear_nlink(inode);
150 } else {
151 /* Yes, some filesystems do change nlink from zero to one */
152 if (inode->i_nlink == 0)
153 atomic_long_dec(&inode->i_sb->s_remove_count);
154
155 inode->__i_nlink = nlink;
156 }
157}
158EXPORT_SYMBOL(set_nlink);
159
160/* from include/linux/fs.h */
161static inline void i_uid_write(struct inode *inode, uid_t uid)
162{
163 inode->i_uid.val = uid;
164}
165
166static inline void i_gid_write(struct inode *inode, gid_t gid)
167{
168 inode->i_gid.val = gid;
169}
170
171void unlock_new_inode(struct inode *inode)
172{
173 return;
174}
175#endif
176
177/*
178 * Maximum amount of memory we may 'kmalloc()' without worrying that we are
179 * allocating too much.
180 */
181#define UBIFS_KMALLOC_OK (128*1024)
182
183/* Slab cache for UBIFS inodes */
184struct kmem_cache *ubifs_inode_slab;
185
186#ifndef __UBOOT__
187/* UBIFS TNC shrinker description */
188static struct shrinker ubifs_shrinker_info = {
189 .scan_objects = ubifs_shrink_scan,
190 .count_objects = ubifs_shrink_count,
191 .seeks = DEFAULT_SEEKS,
192};
193#endif
194
195/**
196 * validate_inode - validate inode.
197 * @c: UBIFS file-system description object
198 * @inode: the inode to validate
199 *
200 * This is a helper function for 'ubifs_iget()' which validates various fields
201 * of a newly built inode to make sure they contain sane values and prevent
202 * possible vulnerabilities. Returns zero if the inode is all right and
203 * a non-zero error code if not.
204 */
205static int validate_inode(struct ubifs_info *c, const struct inode *inode)
206{
207 int err;
208 const struct ubifs_inode *ui = ubifs_inode(inode);
209
210 if (inode->i_size > c->max_inode_sz) {
211 ubifs_err("inode is too large (%lld)",
212 (long long)inode->i_size);
213 return 1;
214 }
215
216 if (ui->compr_type < 0 || ui->compr_type >= UBIFS_COMPR_TYPES_CNT) {
217 ubifs_err("unknown compression type %d", ui->compr_type);
218 return 2;
219 }
220
221 if (ui->xattr_names + ui->xattr_cnt > XATTR_LIST_MAX)
222 return 3;
223
224 if (ui->data_len < 0 || ui->data_len > UBIFS_MAX_INO_DATA)
225 return 4;
226
227 if (ui->xattr && !S_ISREG(inode->i_mode))
228 return 5;
229
230 if (!ubifs_compr_present(ui->compr_type)) {
231 ubifs_warn("inode %lu uses '%s' compression, but it was not compiled in",
232 inode->i_ino, ubifs_compr_name(ui->compr_type));
233 }
234
235 err = dbg_check_dir(c, inode);
236 return err;
237}
238
Stefan Roese2fc10f62009-03-19 15:35:05 +0100239struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
240{
241 int err;
242 union ubifs_key key;
243 struct ubifs_ino_node *ino;
244 struct ubifs_info *c = sb->s_fs_info;
245 struct inode *inode;
246 struct ubifs_inode *ui;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200247#ifdef __UBOOT__
Stefan Roese2fc10f62009-03-19 15:35:05 +0100248 int i;
Heiko Schocherf5895d12014-06-24 10:10:04 +0200249#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +0100250
251 dbg_gen("inode %lu", inum);
252
Heiko Schocherf5895d12014-06-24 10:10:04 +0200253#ifdef __UBOOT__
Stefan Roese2fc10f62009-03-19 15:35:05 +0100254 /*
255 * U-Boot special handling of locked down inodes via recovery
256 * e.g. ubifs_recover_size()
257 */
258 for (i = 0; i < INODE_LOCKED_MAX; i++) {
259 /*
260 * Exit on last entry (NULL), inode not found in list
261 */
262 if (inodes_locked_down[i] == NULL)
263 break;
264
265 if (inodes_locked_down[i]->i_ino == inum) {
266 /*
267 * We found the locked down inode in our array,
268 * so just return this pointer instead of creating
269 * a new one.
270 */
271 return inodes_locked_down[i];
272 }
273 }
Heiko Schocherf5895d12014-06-24 10:10:04 +0200274#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +0100275
276 inode = iget_locked(sb, inum);
277 if (!inode)
278 return ERR_PTR(-ENOMEM);
279 if (!(inode->i_state & I_NEW))
280 return inode;
281 ui = ubifs_inode(inode);
282
283 ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
284 if (!ino) {
285 err = -ENOMEM;
286 goto out;
287 }
288
289 ino_key_init(c, &key, inode->i_ino);
290
291 err = ubifs_tnc_lookup(c, &key, ino);
292 if (err)
293 goto out_ino;
294
295 inode->i_flags |= (S_NOCMTIME | S_NOATIME);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200296 set_nlink(inode, le32_to_cpu(ino->nlink));
297 i_uid_write(inode, le32_to_cpu(ino->uid));
298 i_gid_write(inode, le32_to_cpu(ino->gid));
Stefan Roese2fc10f62009-03-19 15:35:05 +0100299 inode->i_atime.tv_sec = (int64_t)le64_to_cpu(ino->atime_sec);
300 inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
301 inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec);
302 inode->i_mtime.tv_nsec = le32_to_cpu(ino->mtime_nsec);
303 inode->i_ctime.tv_sec = (int64_t)le64_to_cpu(ino->ctime_sec);
304 inode->i_ctime.tv_nsec = le32_to_cpu(ino->ctime_nsec);
305 inode->i_mode = le32_to_cpu(ino->mode);
306 inode->i_size = le64_to_cpu(ino->size);
307
308 ui->data_len = le32_to_cpu(ino->data_len);
309 ui->flags = le32_to_cpu(ino->flags);
310 ui->compr_type = le16_to_cpu(ino->compr_type);
311 ui->creat_sqnum = le64_to_cpu(ino->creat_sqnum);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200312 ui->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
313 ui->xattr_size = le32_to_cpu(ino->xattr_size);
314 ui->xattr_names = le32_to_cpu(ino->xattr_names);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100315 ui->synced_i_size = ui->ui_size = inode->i_size;
316
Heiko Schocherf5895d12014-06-24 10:10:04 +0200317 ui->xattr = (ui->flags & UBIFS_XATTR_FL) ? 1 : 0;
318
Stefan Roese2fc10f62009-03-19 15:35:05 +0100319 err = validate_inode(c, inode);
320 if (err)
321 goto out_invalid;
322
Heiko Schocherf5895d12014-06-24 10:10:04 +0200323#ifndef __UBOOT__
324 /* Disable read-ahead */
325 inode->i_mapping->backing_dev_info = &c->bdi;
326
327 switch (inode->i_mode & S_IFMT) {
328 case S_IFREG:
329 inode->i_mapping->a_ops = &ubifs_file_address_operations;
330 inode->i_op = &ubifs_file_inode_operations;
331 inode->i_fop = &ubifs_file_operations;
332 if (ui->xattr) {
333 ui->data = kmalloc(ui->data_len + 1, GFP_NOFS);
334 if (!ui->data) {
335 err = -ENOMEM;
336 goto out_ino;
337 }
338 memcpy(ui->data, ino->data, ui->data_len);
339 ((char *)ui->data)[ui->data_len] = '\0';
340 } else if (ui->data_len != 0) {
341 err = 10;
342 goto out_invalid;
343 }
344 break;
345 case S_IFDIR:
346 inode->i_op = &ubifs_dir_inode_operations;
347 inode->i_fop = &ubifs_dir_operations;
348 if (ui->data_len != 0) {
349 err = 11;
350 goto out_invalid;
351 }
352 break;
353 case S_IFLNK:
354 inode->i_op = &ubifs_symlink_inode_operations;
355 if (ui->data_len <= 0 || ui->data_len > UBIFS_MAX_INO_DATA) {
356 err = 12;
357 goto out_invalid;
358 }
359 ui->data = kmalloc(ui->data_len + 1, GFP_NOFS);
360 if (!ui->data) {
361 err = -ENOMEM;
362 goto out_ino;
363 }
364 memcpy(ui->data, ino->data, ui->data_len);
365 ((char *)ui->data)[ui->data_len] = '\0';
366 break;
367 case S_IFBLK:
368 case S_IFCHR:
369 {
370 dev_t rdev;
371 union ubifs_dev_desc *dev;
372
373 ui->data = kmalloc(sizeof(union ubifs_dev_desc), GFP_NOFS);
374 if (!ui->data) {
375 err = -ENOMEM;
376 goto out_ino;
377 }
378
379 dev = (union ubifs_dev_desc *)ino->data;
380 if (ui->data_len == sizeof(dev->new))
381 rdev = new_decode_dev(le32_to_cpu(dev->new));
382 else if (ui->data_len == sizeof(dev->huge))
383 rdev = huge_decode_dev(le64_to_cpu(dev->huge));
384 else {
385 err = 13;
386 goto out_invalid;
387 }
388 memcpy(ui->data, ino->data, ui->data_len);
389 inode->i_op = &ubifs_file_inode_operations;
390 init_special_inode(inode, inode->i_mode, rdev);
391 break;
392 }
393 case S_IFSOCK:
394 case S_IFIFO:
395 inode->i_op = &ubifs_file_inode_operations;
396 init_special_inode(inode, inode->i_mode, 0);
397 if (ui->data_len != 0) {
398 err = 14;
399 goto out_invalid;
400 }
401 break;
402 default:
403 err = 15;
404 goto out_invalid;
405 }
406#else
Stefan Roese2fc10f62009-03-19 15:35:05 +0100407 if ((inode->i_mode & S_IFMT) == S_IFLNK) {
408 if (ui->data_len <= 0 || ui->data_len > UBIFS_MAX_INO_DATA) {
409 err = 12;
410 goto out_invalid;
411 }
412 ui->data = kmalloc(ui->data_len + 1, GFP_NOFS);
413 if (!ui->data) {
414 err = -ENOMEM;
415 goto out_ino;
416 }
417 memcpy(ui->data, ino->data, ui->data_len);
418 ((char *)ui->data)[ui->data_len] = '\0';
419 }
Heiko Schocherf5895d12014-06-24 10:10:04 +0200420#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +0100421
422 kfree(ino);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200423#ifndef __UBOOT__
424 ubifs_set_inode_flags(inode);
425#endif
426 unlock_new_inode(inode);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100427 return inode;
428
429out_invalid:
430 ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200431 ubifs_dump_node(c, ino);
432 ubifs_dump_inode(c, inode);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100433 err = -EINVAL;
434out_ino:
435 kfree(ino);
436out:
437 ubifs_err("failed to read inode %lu, error %d", inode->i_ino, err);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200438 iget_failed(inode);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100439 return ERR_PTR(err);
440}
441
Heiko Schocherf5895d12014-06-24 10:10:04 +0200442static struct inode *ubifs_alloc_inode(struct super_block *sb)
Stefan Roese2fc10f62009-03-19 15:35:05 +0100443{
Heiko Schocherf5895d12014-06-24 10:10:04 +0200444 struct ubifs_inode *ui;
Stefan Roese2fc10f62009-03-19 15:35:05 +0100445
Heiko Schocherf5895d12014-06-24 10:10:04 +0200446 ui = kmem_cache_alloc(ubifs_inode_slab, GFP_NOFS);
447 if (!ui)
448 return NULL;
Stefan Roese2fc10f62009-03-19 15:35:05 +0100449
Heiko Schocherf5895d12014-06-24 10:10:04 +0200450 memset((void *)ui + sizeof(struct inode), 0,
451 sizeof(struct ubifs_inode) - sizeof(struct inode));
452 mutex_init(&ui->ui_mutex);
453 spin_lock_init(&ui->ui_lock);
454 return &ui->vfs_inode;
455};
Stefan Roese2fc10f62009-03-19 15:35:05 +0100456
Heiko Schocherf5895d12014-06-24 10:10:04 +0200457#ifndef __UBOOT__
458static void ubifs_i_callback(struct rcu_head *head)
459{
460 struct inode *inode = container_of(head, struct inode, i_rcu);
461 struct ubifs_inode *ui = ubifs_inode(inode);
462 kmem_cache_free(ubifs_inode_slab, ui);
463}
Stefan Roese2fc10f62009-03-19 15:35:05 +0100464
Heiko Schocherf5895d12014-06-24 10:10:04 +0200465static void ubifs_destroy_inode(struct inode *inode)
466{
467 struct ubifs_inode *ui = ubifs_inode(inode);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100468
Heiko Schocherf5895d12014-06-24 10:10:04 +0200469 kfree(ui->data);
470 call_rcu(&inode->i_rcu, ubifs_i_callback);
471}
Stefan Roese2fc10f62009-03-19 15:35:05 +0100472
Heiko Schocherf5895d12014-06-24 10:10:04 +0200473/*
474 * Note, Linux write-back code calls this without 'i_mutex'.
475 */
476static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
477{
478 int err = 0;
479 struct ubifs_info *c = inode->i_sb->s_fs_info;
480 struct ubifs_inode *ui = ubifs_inode(inode);
481
482 ubifs_assert(!ui->xattr);
483 if (is_bad_inode(inode))
484 return 0;
Stefan Roese2fc10f62009-03-19 15:35:05 +0100485
Heiko Schocherf5895d12014-06-24 10:10:04 +0200486 mutex_lock(&ui->ui_mutex);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100487 /*
Heiko Schocherf5895d12014-06-24 10:10:04 +0200488 * Due to races between write-back forced by budgeting
489 * (see 'sync_some_inodes()') and background write-back, the inode may
490 * have already been synchronized, do not do this again. This might
491 * also happen if it was synchronized in an VFS operation, e.g.
492 * 'ubifs_link()'.
Stefan Roese2fc10f62009-03-19 15:35:05 +0100493 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200494 if (!ui->dirty) {
495 mutex_unlock(&ui->ui_mutex);
496 return 0;
Stefan Roese2fc10f62009-03-19 15:35:05 +0100497 }
498
Stefan Roese2fc10f62009-03-19 15:35:05 +0100499 /*
Heiko Schocherf5895d12014-06-24 10:10:04 +0200500 * As an optimization, do not write orphan inodes to the media just
501 * because this is not needed.
Stefan Roese2fc10f62009-03-19 15:35:05 +0100502 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200503 dbg_gen("inode %lu, mode %#x, nlink %u",
504 inode->i_ino, (int)inode->i_mode, inode->i_nlink);
505 if (inode->i_nlink) {
506 err = ubifs_jnl_write_inode(c, inode);
507 if (err)
508 ubifs_err("can't write inode %lu, error %d",
509 inode->i_ino, err);
510 else
511 err = dbg_check_inode_size(c, inode, ui->ui_size);
512 }
Stefan Roese2fc10f62009-03-19 15:35:05 +0100513
Heiko Schocherf5895d12014-06-24 10:10:04 +0200514 ui->dirty = 0;
515 mutex_unlock(&ui->ui_mutex);
516 ubifs_release_dirty_inode_budget(c, ui);
517 return err;
518}
519
520static void ubifs_evict_inode(struct inode *inode)
521{
522 int err;
523 struct ubifs_info *c = inode->i_sb->s_fs_info;
524 struct ubifs_inode *ui = ubifs_inode(inode);
525
526 if (ui->xattr)
527 /*
528 * Extended attribute inode deletions are fully handled in
529 * 'ubifs_removexattr()'. These inodes are special and have
530 * limited usage, so there is nothing to do here.
531 */
532 goto out;
533
534 dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode);
535 ubifs_assert(!atomic_read(&inode->i_count));
536
Heiko Schocher081fe9e2014-07-15 16:08:43 +0200537 truncate_inode_pages_final(&inode->i_data);
Heiko Schocherf5895d12014-06-24 10:10:04 +0200538
539 if (inode->i_nlink)
540 goto done;
541
542 if (is_bad_inode(inode))
543 goto out;
544
545 ui->ui_size = inode->i_size = 0;
546 err = ubifs_jnl_delete_inode(c, inode);
547 if (err)
548 /*
549 * Worst case we have a lost orphan inode wasting space, so a
550 * simple error message is OK here.
551 */
552 ubifs_err("can't delete inode %lu, error %d",
553 inode->i_ino, err);
554
555out:
556 if (ui->dirty)
557 ubifs_release_dirty_inode_budget(c, ui);
558 else {
559 /* We've deleted something - clean the "no space" flags */
560 c->bi.nospace = c->bi.nospace_rp = 0;
561 smp_wmb();
562 }
563done:
564 clear_inode(inode);
565}
566#endif
567
568static void ubifs_dirty_inode(struct inode *inode, int flags)
569{
570 struct ubifs_inode *ui = ubifs_inode(inode);
571
572 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
573 if (!ui->dirty) {
574 ui->dirty = 1;
575 dbg_gen("inode %lu", inode->i_ino);
576 }
577}
578
579#ifndef __UBOOT__
580static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf)
581{
582 struct ubifs_info *c = dentry->d_sb->s_fs_info;
583 unsigned long long free;
584 __le32 *uuid = (__le32 *)c->uuid;
585
586 free = ubifs_get_free_space(c);
587 dbg_gen("free space %lld bytes (%lld blocks)",
588 free, free >> UBIFS_BLOCK_SHIFT);
589
590 buf->f_type = UBIFS_SUPER_MAGIC;
591 buf->f_bsize = UBIFS_BLOCK_SIZE;
592 buf->f_blocks = c->block_cnt;
593 buf->f_bfree = free >> UBIFS_BLOCK_SHIFT;
594 if (free > c->report_rp_size)
595 buf->f_bavail = (free - c->report_rp_size) >> UBIFS_BLOCK_SHIFT;
596 else
597 buf->f_bavail = 0;
598 buf->f_files = 0;
599 buf->f_ffree = 0;
600 buf->f_namelen = UBIFS_MAX_NLEN;
601 buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]);
602 buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]);
603 ubifs_assert(buf->f_bfree <= c->block_cnt);
604 return 0;
605}
606
607static int ubifs_show_options(struct seq_file *s, struct dentry *root)
608{
609 struct ubifs_info *c = root->d_sb->s_fs_info;
610
611 if (c->mount_opts.unmount_mode == 2)
612 seq_printf(s, ",fast_unmount");
613 else if (c->mount_opts.unmount_mode == 1)
614 seq_printf(s, ",norm_unmount");
615
616 if (c->mount_opts.bulk_read == 2)
617 seq_printf(s, ",bulk_read");
618 else if (c->mount_opts.bulk_read == 1)
619 seq_printf(s, ",no_bulk_read");
620
621 if (c->mount_opts.chk_data_crc == 2)
622 seq_printf(s, ",chk_data_crc");
623 else if (c->mount_opts.chk_data_crc == 1)
624 seq_printf(s, ",no_chk_data_crc");
625
626 if (c->mount_opts.override_compr) {
627 seq_printf(s, ",compr=%s",
628 ubifs_compr_name(c->mount_opts.compr_type));
629 }
630
631 return 0;
632}
633
634static int ubifs_sync_fs(struct super_block *sb, int wait)
635{
636 int i, err;
637 struct ubifs_info *c = sb->s_fs_info;
638
639 /*
640 * Zero @wait is just an advisory thing to help the file system shove
641 * lots of data into the queues, and there will be the second
642 * '->sync_fs()' call, with non-zero @wait.
643 */
644 if (!wait)
645 return 0;
646
647 /*
648 * Synchronize write buffers, because 'ubifs_run_commit()' does not
649 * do this if it waits for an already running commit.
650 */
651 for (i = 0; i < c->jhead_cnt; i++) {
652 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
653 if (err)
654 return err;
655 }
656
657 /*
658 * Strictly speaking, it is not necessary to commit the journal here,
659 * synchronizing write-buffers would be enough. But committing makes
660 * UBIFS free space predictions much more accurate, so we want to let
661 * the user be able to get more accurate results of 'statfs()' after
662 * they synchronize the file system.
663 */
664 err = ubifs_run_commit(c);
665 if (err)
666 return err;
667
668 return ubi_sync(c->vi.ubi_num);
669}
670#endif
671
672/**
673 * init_constants_early - initialize UBIFS constants.
674 * @c: UBIFS file-system description object
675 *
676 * This function initialize UBIFS constants which do not need the superblock to
677 * be read. It also checks that the UBI volume satisfies basic UBIFS
678 * requirements. Returns zero in case of success and a negative error code in
679 * case of failure.
680 */
681static int init_constants_early(struct ubifs_info *c)
682{
683 if (c->vi.corrupted) {
684 ubifs_warn("UBI volume is corrupted - read-only mode");
685 c->ro_media = 1;
686 }
687
688 if (c->di.ro_mode) {
689 ubifs_msg("read-only UBI device");
690 c->ro_media = 1;
691 }
692
693 if (c->vi.vol_type == UBI_STATIC_VOLUME) {
694 ubifs_msg("static UBI volume - read-only mode");
695 c->ro_media = 1;
696 }
697
698 c->leb_cnt = c->vi.size;
699 c->leb_size = c->vi.usable_leb_size;
700 c->leb_start = c->di.leb_start;
701 c->half_leb_size = c->leb_size / 2;
702 c->min_io_size = c->di.min_io_size;
703 c->min_io_shift = fls(c->min_io_size) - 1;
704 c->max_write_size = c->di.max_write_size;
705 c->max_write_shift = fls(c->max_write_size) - 1;
706
707 if (c->leb_size < UBIFS_MIN_LEB_SZ) {
708 ubifs_err("too small LEBs (%d bytes), min. is %d bytes",
709 c->leb_size, UBIFS_MIN_LEB_SZ);
710 return -EINVAL;
711 }
712
713 if (c->leb_cnt < UBIFS_MIN_LEB_CNT) {
714 ubifs_err("too few LEBs (%d), min. is %d",
715 c->leb_cnt, UBIFS_MIN_LEB_CNT);
716 return -EINVAL;
717 }
718
719 if (!is_power_of_2(c->min_io_size)) {
720 ubifs_err("bad min. I/O size %d", c->min_io_size);
721 return -EINVAL;
722 }
723
724 /*
725 * Maximum write size has to be greater or equivalent to min. I/O
726 * size, and be multiple of min. I/O size.
727 */
728 if (c->max_write_size < c->min_io_size ||
729 c->max_write_size % c->min_io_size ||
730 !is_power_of_2(c->max_write_size)) {
731 ubifs_err("bad write buffer size %d for %d min. I/O unit",
732 c->max_write_size, c->min_io_size);
733 return -EINVAL;
734 }
735
736 /*
737 * UBIFS aligns all node to 8-byte boundary, so to make function in
738 * io.c simpler, assume minimum I/O unit size to be 8 bytes if it is
739 * less than 8.
740 */
741 if (c->min_io_size < 8) {
742 c->min_io_size = 8;
743 c->min_io_shift = 3;
744 if (c->max_write_size < c->min_io_size) {
745 c->max_write_size = c->min_io_size;
746 c->max_write_shift = c->min_io_shift;
747 }
748 }
749
750 c->ref_node_alsz = ALIGN(UBIFS_REF_NODE_SZ, c->min_io_size);
751 c->mst_node_alsz = ALIGN(UBIFS_MST_NODE_SZ, c->min_io_size);
752
753 /*
754 * Initialize node length ranges which are mostly needed for node
755 * length validation.
756 */
757 c->ranges[UBIFS_PAD_NODE].len = UBIFS_PAD_NODE_SZ;
758 c->ranges[UBIFS_SB_NODE].len = UBIFS_SB_NODE_SZ;
759 c->ranges[UBIFS_MST_NODE].len = UBIFS_MST_NODE_SZ;
760 c->ranges[UBIFS_REF_NODE].len = UBIFS_REF_NODE_SZ;
761 c->ranges[UBIFS_TRUN_NODE].len = UBIFS_TRUN_NODE_SZ;
762 c->ranges[UBIFS_CS_NODE].len = UBIFS_CS_NODE_SZ;
763
764 c->ranges[UBIFS_INO_NODE].min_len = UBIFS_INO_NODE_SZ;
765 c->ranges[UBIFS_INO_NODE].max_len = UBIFS_MAX_INO_NODE_SZ;
766 c->ranges[UBIFS_ORPH_NODE].min_len =
Stefan Roese2fc10f62009-03-19 15:35:05 +0100767 UBIFS_ORPH_NODE_SZ + sizeof(__le64);
768 c->ranges[UBIFS_ORPH_NODE].max_len = c->leb_size;
769 c->ranges[UBIFS_DENT_NODE].min_len = UBIFS_DENT_NODE_SZ;
770 c->ranges[UBIFS_DENT_NODE].max_len = UBIFS_MAX_DENT_NODE_SZ;
771 c->ranges[UBIFS_XENT_NODE].min_len = UBIFS_XENT_NODE_SZ;
772 c->ranges[UBIFS_XENT_NODE].max_len = UBIFS_MAX_XENT_NODE_SZ;
773 c->ranges[UBIFS_DATA_NODE].min_len = UBIFS_DATA_NODE_SZ;
774 c->ranges[UBIFS_DATA_NODE].max_len = UBIFS_MAX_DATA_NODE_SZ;
775 /*
776 * Minimum indexing node size is amended later when superblock is
777 * read and the key length is known.
778 */
779 c->ranges[UBIFS_IDX_NODE].min_len = UBIFS_IDX_NODE_SZ + UBIFS_BRANCH_SZ;
780 /*
781 * Maximum indexing node size is amended later when superblock is
782 * read and the fanout is known.
783 */
784 c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX;
785
786 /*
787 * Initialize dead and dark LEB space watermarks. See gc.c for comments
788 * about these values.
789 */
790 c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size);
791 c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size);
792
793 /*
794 * Calculate how many bytes would be wasted at the end of LEB if it was
795 * fully filled with data nodes of maximum size. This is used in
796 * calculations when reporting free space.
797 */
798 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
799
Heiko Schocherf5895d12014-06-24 10:10:04 +0200800 /* Buffer size for bulk-reads */
801 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
802 if (c->max_bu_buf_len > c->leb_size)
803 c->max_bu_buf_len = c->leb_size;
Stefan Roese2fc10f62009-03-19 15:35:05 +0100804 return 0;
805}
806
Heiko Schocherf5895d12014-06-24 10:10:04 +0200807/**
808 * bud_wbuf_callback - bud LEB write-buffer synchronization call-back.
809 * @c: UBIFS file-system description object
810 * @lnum: LEB the write-buffer was synchronized to
811 * @free: how many free bytes left in this LEB
812 * @pad: how many bytes were padded
813 *
814 * This is a callback function which is called by the I/O unit when the
815 * write-buffer is synchronized. We need this to correctly maintain space
816 * accounting in bud logical eraseblocks. This function returns zero in case of
817 * success and a negative error code in case of failure.
818 *
819 * This function actually belongs to the journal, but we keep it here because
820 * we want to keep it static.
821 */
822static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad)
823{
824 return ubifs_update_one_lp(c, lnum, free, pad, 0, 0);
825}
826
Stefan Roese2fc10f62009-03-19 15:35:05 +0100827/*
828 * init_constants_sb - initialize UBIFS constants.
829 * @c: UBIFS file-system description object
830 *
831 * This is a helper function which initializes various UBIFS constants after
832 * the superblock has been read. It also checks various UBIFS parameters and
833 * makes sure they are all right. Returns zero in case of success and a
834 * negative error code in case of failure.
835 */
836static int init_constants_sb(struct ubifs_info *c)
837{
838 int tmp, err;
839 long long tmp64;
840
841 c->main_bytes = (long long)c->main_lebs * c->leb_size;
842 c->max_znode_sz = sizeof(struct ubifs_znode) +
843 c->fanout * sizeof(struct ubifs_zbranch);
844
845 tmp = ubifs_idx_node_sz(c, 1);
846 c->ranges[UBIFS_IDX_NODE].min_len = tmp;
847 c->min_idx_node_sz = ALIGN(tmp, 8);
848
849 tmp = ubifs_idx_node_sz(c, c->fanout);
850 c->ranges[UBIFS_IDX_NODE].max_len = tmp;
851 c->max_idx_node_sz = ALIGN(tmp, 8);
852
853 /* Make sure LEB size is large enough to fit full commit */
854 tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt;
855 tmp = ALIGN(tmp, c->min_io_size);
856 if (tmp > c->leb_size) {
Heiko Schocherf5895d12014-06-24 10:10:04 +0200857 ubifs_err("too small LEB size %d, at least %d needed",
858 c->leb_size, tmp);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100859 return -EINVAL;
860 }
861
862 /*
863 * Make sure that the log is large enough to fit reference nodes for
864 * all buds plus one reserved LEB.
865 */
866 tmp64 = c->max_bud_bytes + c->leb_size - 1;
867 c->max_bud_cnt = div_u64(tmp64, c->leb_size);
868 tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1);
869 tmp /= c->leb_size;
870 tmp += 1;
871 if (c->log_lebs < tmp) {
Heiko Schocherf5895d12014-06-24 10:10:04 +0200872 ubifs_err("too small log %d LEBs, required min. %d LEBs",
873 c->log_lebs, tmp);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100874 return -EINVAL;
875 }
876
877 /*
878 * When budgeting we assume worst-case scenarios when the pages are not
879 * be compressed and direntries are of the maximum size.
880 *
881 * Note, data, which may be stored in inodes is budgeted separately, so
Heiko Schocherf5895d12014-06-24 10:10:04 +0200882 * it is not included into 'c->bi.inode_budget'.
Stefan Roese2fc10f62009-03-19 15:35:05 +0100883 */
Heiko Schocherf5895d12014-06-24 10:10:04 +0200884 c->bi.page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE;
885 c->bi.inode_budget = UBIFS_INO_NODE_SZ;
886 c->bi.dent_budget = UBIFS_MAX_DENT_NODE_SZ;
Stefan Roese2fc10f62009-03-19 15:35:05 +0100887
888 /*
889 * When the amount of flash space used by buds becomes
890 * 'c->max_bud_bytes', UBIFS just blocks all writers and starts commit.
891 * The writers are unblocked when the commit is finished. To avoid
892 * writers to be blocked UBIFS initiates background commit in advance,
893 * when number of bud bytes becomes above the limit defined below.
894 */
895 c->bg_bud_bytes = (c->max_bud_bytes * 13) >> 4;
896
897 /*
898 * Ensure minimum journal size. All the bytes in the journal heads are
899 * considered to be used, when calculating the current journal usage.
900 * Consequently, if the journal is too small, UBIFS will treat it as
901 * always full.
902 */
903 tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1;
904 if (c->bg_bud_bytes < tmp64)
905 c->bg_bud_bytes = tmp64;
906 if (c->max_bud_bytes < tmp64 + c->leb_size)
907 c->max_bud_bytes = tmp64 + c->leb_size;
908
909 err = ubifs_calc_lpt_geom(c);
910 if (err)
911 return err;
912
Heiko Schocherf5895d12014-06-24 10:10:04 +0200913 /* Initialize effective LEB size used in budgeting calculations */
914 c->idx_leb_size = c->leb_size - c->max_idx_node_sz;
Stefan Roese2fc10f62009-03-19 15:35:05 +0100915 return 0;
916}
917
918/*
919 * init_constants_master - initialize UBIFS constants.
920 * @c: UBIFS file-system description object
921 *
922 * This is a helper function which initializes various UBIFS constants after
923 * the master node has been read. It also checks various UBIFS parameters and
924 * makes sure they are all right.
925 */
926static void init_constants_master(struct ubifs_info *c)
927{
928 long long tmp64;
929
Heiko Schocherf5895d12014-06-24 10:10:04 +0200930 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
931 c->report_rp_size = ubifs_reported_space(c, c->rp_size);
Stefan Roese2fc10f62009-03-19 15:35:05 +0100932
933 /*
934 * Calculate total amount of FS blocks. This number is not used
935 * internally because it does not make much sense for UBIFS, but it is
936 * necessary to report something for the 'statfs()' call.
937 *
938 * Subtract the LEB reserved for GC, the LEB which is reserved for
939 * deletions, minimum LEBs for the index, and assume only one journal
940 * head is available.
941 */
942 tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1;
943 tmp64 *= (long long)c->leb_size - c->leb_overhead;
944 tmp64 = ubifs_reported_space(c, tmp64);
945 c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT;
946}
947
948/**
Heiko Schocherf5895d12014-06-24 10:10:04 +0200949 * take_gc_lnum - reserve GC LEB.
950 * @c: UBIFS file-system description object
951 *
952 * This function ensures that the LEB reserved for garbage collection is marked
953 * as "taken" in lprops. We also have to set free space to LEB size and dirty
954 * space to zero, because lprops may contain out-of-date information if the
955 * file-system was un-mounted before it has been committed. This function
956 * returns zero in case of success and a negative error code in case of
957 * failure.
958 */
959static int take_gc_lnum(struct ubifs_info *c)
960{
961 int err;
962
963 if (c->gc_lnum == -1) {
964 ubifs_err("no LEB for GC");
965 return -EINVAL;
966 }
967
968 /* And we have to tell lprops that this LEB is taken */
969 err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0,
970 LPROPS_TAKEN, 0, 0);
971 return err;
972}
973
974/**
975 * alloc_wbufs - allocate write-buffers.
976 * @c: UBIFS file-system description object
977 *
978 * This helper function allocates and initializes UBIFS write-buffers. Returns
979 * zero in case of success and %-ENOMEM in case of failure.
980 */
981static int alloc_wbufs(struct ubifs_info *c)
982{
983 int i, err;
984
985 c->jheads = kzalloc(c->jhead_cnt * sizeof(struct ubifs_jhead),
986 GFP_KERNEL);
987 if (!c->jheads)
988 return -ENOMEM;
989
990 /* Initialize journal heads */
991 for (i = 0; i < c->jhead_cnt; i++) {
992 INIT_LIST_HEAD(&c->jheads[i].buds_list);
993 err = ubifs_wbuf_init(c, &c->jheads[i].wbuf);
994 if (err)
995 return err;
996
997 c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
998 c->jheads[i].wbuf.jhead = i;
999 c->jheads[i].grouped = 1;
1000 }
1001
1002 /*
1003 * Garbage Collector head does not need to be synchronized by timer.
1004 * Also GC head nodes are not grouped.
1005 */
1006 c->jheads[GCHD].wbuf.no_timer = 1;
1007 c->jheads[GCHD].grouped = 0;
1008
1009 return 0;
1010}
1011
1012/**
1013 * free_wbufs - free write-buffers.
1014 * @c: UBIFS file-system description object
1015 */
1016static void free_wbufs(struct ubifs_info *c)
1017{
1018 int i;
1019
1020 if (c->jheads) {
1021 for (i = 0; i < c->jhead_cnt; i++) {
1022 kfree(c->jheads[i].wbuf.buf);
1023 kfree(c->jheads[i].wbuf.inodes);
1024 }
1025 kfree(c->jheads);
1026 c->jheads = NULL;
1027 }
1028}
1029
1030/**
Stefan Roese2fc10f62009-03-19 15:35:05 +01001031 * free_orphans - free orphans.
1032 * @c: UBIFS file-system description object
1033 */
1034static void free_orphans(struct ubifs_info *c)
1035{
1036 struct ubifs_orphan *orph;
1037
1038 while (c->orph_dnext) {
1039 orph = c->orph_dnext;
1040 c->orph_dnext = orph->dnext;
1041 list_del(&orph->list);
1042 kfree(orph);
1043 }
1044
1045 while (!list_empty(&c->orph_list)) {
1046 orph = list_entry(c->orph_list.next, struct ubifs_orphan, list);
1047 list_del(&orph->list);
1048 kfree(orph);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001049 ubifs_err("orphan list not empty at unmount");
Stefan Roese2fc10f62009-03-19 15:35:05 +01001050 }
1051
1052 vfree(c->orph_buf);
1053 c->orph_buf = NULL;
1054}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001055
Heiko Schocherf5895d12014-06-24 10:10:04 +02001056/**
1057 * free_buds - free per-bud objects.
1058 * @c: UBIFS file-system description object
1059 */
1060static void free_buds(struct ubifs_info *c)
1061{
1062 struct ubifs_bud *bud, *n;
1063
1064 rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb)
1065 kfree(bud);
1066}
Stefan Roese2fc10f62009-03-19 15:35:05 +01001067
1068/**
1069 * check_volume_empty - check if the UBI volume is empty.
1070 * @c: UBIFS file-system description object
1071 *
1072 * This function checks if the UBIFS volume is empty by looking if its LEBs are
1073 * mapped or not. The result of checking is stored in the @c->empty variable.
1074 * Returns zero in case of success and a negative error code in case of
1075 * failure.
1076 */
1077static int check_volume_empty(struct ubifs_info *c)
1078{
1079 int lnum, err;
1080
1081 c->empty = 1;
1082 for (lnum = 0; lnum < c->leb_cnt; lnum++) {
Heiko Schocherf5895d12014-06-24 10:10:04 +02001083 err = ubifs_is_mapped(c, lnum);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001084 if (unlikely(err < 0))
1085 return err;
1086 if (err == 1) {
1087 c->empty = 0;
1088 break;
1089 }
1090
1091 cond_resched();
1092 }
1093
1094 return 0;
1095}
1096
Heiko Schocherf5895d12014-06-24 10:10:04 +02001097/*
1098 * UBIFS mount options.
1099 *
1100 * Opt_fast_unmount: do not run a journal commit before un-mounting
1101 * Opt_norm_unmount: run a journal commit before un-mounting
1102 * Opt_bulk_read: enable bulk-reads
1103 * Opt_no_bulk_read: disable bulk-reads
1104 * Opt_chk_data_crc: check CRCs when reading data nodes
1105 * Opt_no_chk_data_crc: do not check CRCs when reading data nodes
1106 * Opt_override_compr: override default compressor
1107 * Opt_err: just end of array marker
1108 */
1109enum {
1110 Opt_fast_unmount,
1111 Opt_norm_unmount,
1112 Opt_bulk_read,
1113 Opt_no_bulk_read,
1114 Opt_chk_data_crc,
1115 Opt_no_chk_data_crc,
1116 Opt_override_compr,
1117 Opt_err,
1118};
1119
1120#ifndef __UBOOT__
1121static const match_table_t tokens = {
1122 {Opt_fast_unmount, "fast_unmount"},
1123 {Opt_norm_unmount, "norm_unmount"},
1124 {Opt_bulk_read, "bulk_read"},
1125 {Opt_no_bulk_read, "no_bulk_read"},
1126 {Opt_chk_data_crc, "chk_data_crc"},
1127 {Opt_no_chk_data_crc, "no_chk_data_crc"},
1128 {Opt_override_compr, "compr=%s"},
1129 {Opt_err, NULL},
1130};
1131
1132/**
1133 * parse_standard_option - parse a standard mount option.
1134 * @option: the option to parse
1135 *
1136 * Normally, standard mount options like "sync" are passed to file-systems as
1137 * flags. However, when a "rootflags=" kernel boot parameter is used, they may
1138 * be present in the options string. This function tries to deal with this
1139 * situation and parse standard options. Returns 0 if the option was not
1140 * recognized, and the corresponding integer flag if it was.
1141 *
1142 * UBIFS is only interested in the "sync" option, so do not check for anything
1143 * else.
1144 */
1145static int parse_standard_option(const char *option)
1146{
1147 ubifs_msg("parse %s", option);
1148 if (!strcmp(option, "sync"))
1149 return MS_SYNCHRONOUS;
1150 return 0;
1151}
1152
1153/**
1154 * ubifs_parse_options - parse mount parameters.
1155 * @c: UBIFS file-system description object
1156 * @options: parameters to parse
1157 * @is_remount: non-zero if this is FS re-mount
1158 *
1159 * This function parses UBIFS mount options and returns zero in case success
1160 * and a negative error code in case of failure.
1161 */
1162static int ubifs_parse_options(struct ubifs_info *c, char *options,
1163 int is_remount)
1164{
1165 char *p;
1166 substring_t args[MAX_OPT_ARGS];
1167
1168 if (!options)
1169 return 0;
1170
1171 while ((p = strsep(&options, ","))) {
1172 int token;
1173
1174 if (!*p)
1175 continue;
1176
1177 token = match_token(p, tokens, args);
1178 switch (token) {
1179 /*
1180 * %Opt_fast_unmount and %Opt_norm_unmount options are ignored.
1181 * We accept them in order to be backward-compatible. But this
1182 * should be removed at some point.
1183 */
1184 case Opt_fast_unmount:
1185 c->mount_opts.unmount_mode = 2;
1186 break;
1187 case Opt_norm_unmount:
1188 c->mount_opts.unmount_mode = 1;
1189 break;
1190 case Opt_bulk_read:
1191 c->mount_opts.bulk_read = 2;
1192 c->bulk_read = 1;
1193 break;
1194 case Opt_no_bulk_read:
1195 c->mount_opts.bulk_read = 1;
1196 c->bulk_read = 0;
1197 break;
1198 case Opt_chk_data_crc:
1199 c->mount_opts.chk_data_crc = 2;
1200 c->no_chk_data_crc = 0;
1201 break;
1202 case Opt_no_chk_data_crc:
1203 c->mount_opts.chk_data_crc = 1;
1204 c->no_chk_data_crc = 1;
1205 break;
1206 case Opt_override_compr:
1207 {
1208 char *name = match_strdup(&args[0]);
1209
1210 if (!name)
1211 return -ENOMEM;
1212 if (!strcmp(name, "none"))
1213 c->mount_opts.compr_type = UBIFS_COMPR_NONE;
1214 else if (!strcmp(name, "lzo"))
1215 c->mount_opts.compr_type = UBIFS_COMPR_LZO;
1216 else if (!strcmp(name, "zlib"))
1217 c->mount_opts.compr_type = UBIFS_COMPR_ZLIB;
1218 else {
1219 ubifs_err("unknown compressor \"%s\"", name);
1220 kfree(name);
1221 return -EINVAL;
1222 }
1223 kfree(name);
1224 c->mount_opts.override_compr = 1;
1225 c->default_compr = c->mount_opts.compr_type;
1226 break;
1227 }
1228 default:
1229 {
1230 unsigned long flag;
1231 struct super_block *sb = c->vfs_sb;
1232
1233 flag = parse_standard_option(p);
1234 if (!flag) {
1235 ubifs_err("unrecognized mount option \"%s\" or missing value",
1236 p);
1237 return -EINVAL;
1238 }
1239 sb->s_flags |= flag;
1240 break;
1241 }
1242 }
1243 }
1244
1245 return 0;
1246}
Anton Habegger7c470312015-01-22 22:29:11 +01001247#endif
Heiko Schocherf5895d12014-06-24 10:10:04 +02001248
Stefan Roese2fc10f62009-03-19 15:35:05 +01001249/**
Heiko Schocherf5895d12014-06-24 10:10:04 +02001250 * destroy_journal - destroy journal data structures.
1251 * @c: UBIFS file-system description object
1252 *
1253 * This function destroys journal data structures including those that may have
1254 * been created by recovery functions.
1255 */
1256static void destroy_journal(struct ubifs_info *c)
1257{
1258 while (!list_empty(&c->unclean_leb_list)) {
1259 struct ubifs_unclean_leb *ucleb;
1260
1261 ucleb = list_entry(c->unclean_leb_list.next,
1262 struct ubifs_unclean_leb, list);
1263 list_del(&ucleb->list);
1264 kfree(ucleb);
1265 }
1266 while (!list_empty(&c->old_buds)) {
1267 struct ubifs_bud *bud;
1268
1269 bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
1270 list_del(&bud->list);
1271 kfree(bud);
1272 }
1273 ubifs_destroy_idx_gc(c);
1274 ubifs_destroy_size_tree(c);
1275 ubifs_tnc_close(c);
1276 free_buds(c);
1277}
Heiko Schocherf5895d12014-06-24 10:10:04 +02001278
1279/**
1280 * bu_init - initialize bulk-read information.
1281 * @c: UBIFS file-system description object
1282 */
1283static void bu_init(struct ubifs_info *c)
1284{
1285 ubifs_assert(c->bulk_read == 1);
1286
1287 if (c->bu.buf)
1288 return; /* Already initialized */
1289
1290again:
1291 c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN);
1292 if (!c->bu.buf) {
1293 if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) {
1294 c->max_bu_buf_len = UBIFS_KMALLOC_OK;
1295 goto again;
1296 }
1297
1298 /* Just disable bulk-read */
1299 ubifs_warn("cannot allocate %d bytes of memory for bulk-read, disabling it",
1300 c->max_bu_buf_len);
1301 c->mount_opts.bulk_read = 1;
1302 c->bulk_read = 0;
1303 return;
1304 }
1305}
1306
1307#ifndef __UBOOT__
1308/**
1309 * check_free_space - check if there is enough free space to mount.
1310 * @c: UBIFS file-system description object
1311 *
1312 * This function makes sure UBIFS has enough free space to be mounted in
1313 * read/write mode. UBIFS must always have some free space to allow deletions.
1314 */
1315static int check_free_space(struct ubifs_info *c)
1316{
1317 ubifs_assert(c->dark_wm > 0);
1318 if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) {
1319 ubifs_err("insufficient free space to mount in R/W mode");
1320 ubifs_dump_budg(c, &c->bi);
1321 ubifs_dump_lprops(c);
1322 return -ENOSPC;
1323 }
1324 return 0;
1325}
1326#endif
1327
1328/**
Stefan Roese2fc10f62009-03-19 15:35:05 +01001329 * mount_ubifs - mount UBIFS file-system.
1330 * @c: UBIFS file-system description object
1331 *
1332 * This function mounts UBIFS file system. Returns zero in case of success and
1333 * a negative error code in case of failure.
Stefan Roese2fc10f62009-03-19 15:35:05 +01001334 */
1335static int mount_ubifs(struct ubifs_info *c)
1336{
Heiko Schocherf5895d12014-06-24 10:10:04 +02001337 int err;
1338 long long x, y;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001339 size_t sz;
1340
Heiko Schocherf5895d12014-06-24 10:10:04 +02001341 c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
1342#ifdef __UBOOT__
1343 if (!c->ro_mount) {
1344 printf("UBIFS: only ro mode in U-Boot allowed.\n");
1345 return -EACCES;
1346 }
1347#endif
1348
Stefan Roese2fc10f62009-03-19 15:35:05 +01001349 err = init_constants_early(c);
1350 if (err)
1351 return err;
1352
1353 err = ubifs_debugging_init(c);
1354 if (err)
1355 return err;
1356
1357 err = check_volume_empty(c);
1358 if (err)
1359 goto out_free;
1360
Heiko Schocherf5895d12014-06-24 10:10:04 +02001361 if (c->empty && (c->ro_mount || c->ro_media)) {
Stefan Roese2fc10f62009-03-19 15:35:05 +01001362 /*
1363 * This UBI volume is empty, and read-only, or the file system
1364 * is mounted read-only - we cannot format it.
1365 */
1366 ubifs_err("can't format empty UBI volume: read-only %s",
1367 c->ro_media ? "UBI volume" : "mount");
1368 err = -EROFS;
1369 goto out_free;
1370 }
1371
Heiko Schocherf5895d12014-06-24 10:10:04 +02001372 if (c->ro_media && !c->ro_mount) {
Stefan Roese2fc10f62009-03-19 15:35:05 +01001373 ubifs_err("cannot mount read-write - read-only media");
1374 err = -EROFS;
1375 goto out_free;
1376 }
1377
1378 /*
1379 * The requirement for the buffer is that it should fit indexing B-tree
1380 * height amount of integers. We assume the height if the TNC tree will
1381 * never exceed 64.
1382 */
1383 err = -ENOMEM;
1384 c->bottom_up_buf = kmalloc(BOTTOM_UP_HEIGHT * sizeof(int), GFP_KERNEL);
1385 if (!c->bottom_up_buf)
1386 goto out_free;
1387
1388 c->sbuf = vmalloc(c->leb_size);
1389 if (!c->sbuf)
1390 goto out_free;
1391
Heiko Schocherf5895d12014-06-24 10:10:04 +02001392#ifndef __UBOOT__
1393 if (!c->ro_mount) {
1394 c->ileb_buf = vmalloc(c->leb_size);
1395 if (!c->ileb_buf)
1396 goto out_free;
1397 }
1398#endif
1399
1400 if (c->bulk_read == 1)
1401 bu_init(c);
1402
1403#ifndef __UBOOT__
1404 if (!c->ro_mount) {
1405 c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ,
1406 GFP_KERNEL);
1407 if (!c->write_reserve_buf)
1408 goto out_free;
1409 }
1410#endif
1411
1412 c->mounting = 1;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001413
1414 err = ubifs_read_superblock(c);
1415 if (err)
1416 goto out_free;
1417
1418 /*
1419 * Make sure the compressor which is set as default in the superblock
1420 * or overridden by mount options is actually compiled in.
1421 */
1422 if (!ubifs_compr_present(c->default_compr)) {
1423 ubifs_err("'compressor \"%s\" is not compiled in",
1424 ubifs_compr_name(c->default_compr));
Heiko Schocherf5895d12014-06-24 10:10:04 +02001425 err = -ENOTSUPP;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001426 goto out_free;
1427 }
1428
Stefan Roese2fc10f62009-03-19 15:35:05 +01001429 err = init_constants_sb(c);
1430 if (err)
1431 goto out_free;
1432
1433 sz = ALIGN(c->max_idx_node_sz, c->min_io_size);
1434 sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size);
1435 c->cbuf = kmalloc(sz, GFP_NOFS);
1436 if (!c->cbuf) {
1437 err = -ENOMEM;
1438 goto out_free;
1439 }
1440
Heiko Schocherf5895d12014-06-24 10:10:04 +02001441 err = alloc_wbufs(c);
1442 if (err)
1443 goto out_cbuf;
1444
Stefan Roese2fc10f62009-03-19 15:35:05 +01001445 sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001446#ifndef __UBOOT__
1447 if (!c->ro_mount) {
1448 /* Create background thread */
1449 c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name);
1450 if (IS_ERR(c->bgt)) {
1451 err = PTR_ERR(c->bgt);
1452 c->bgt = NULL;
1453 ubifs_err("cannot spawn \"%s\", error %d",
1454 c->bgt_name, err);
1455 goto out_wbufs;
1456 }
1457 wake_up_process(c->bgt);
1458 }
1459#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01001460
1461 err = ubifs_read_master(c);
1462 if (err)
1463 goto out_master;
1464
1465 init_constants_master(c);
1466
1467 if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
1468 ubifs_msg("recovery needed");
1469 c->need_recovery = 1;
1470 }
1471
Heiko Schocherf5895d12014-06-24 10:10:04 +02001472#ifndef __UBOOT__
1473 if (c->need_recovery && !c->ro_mount) {
1474 err = ubifs_recover_inl_heads(c, c->sbuf);
1475 if (err)
1476 goto out_master;
1477 }
1478#endif
1479
1480 err = ubifs_lpt_init(c, 1, !c->ro_mount);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001481 if (err)
Heiko Schocherf5895d12014-06-24 10:10:04 +02001482 goto out_master;
1483
1484#ifndef __UBOOT__
1485 if (!c->ro_mount && c->space_fixup) {
1486 err = ubifs_fixup_free_space(c);
1487 if (err)
1488 goto out_lpt;
1489 }
1490
1491 if (!c->ro_mount) {
1492 /*
1493 * Set the "dirty" flag so that if we reboot uncleanly we
1494 * will notice this immediately on the next mount.
1495 */
1496 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
1497 err = ubifs_write_master(c);
1498 if (err)
1499 goto out_lpt;
1500 }
1501#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01001502
Heiko Schocherf5895d12014-06-24 10:10:04 +02001503 err = dbg_check_idx_size(c, c->bi.old_idx_sz);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001504 if (err)
1505 goto out_lpt;
1506
1507 err = ubifs_replay_journal(c);
1508 if (err)
1509 goto out_journal;
1510
Heiko Schocherf5895d12014-06-24 10:10:04 +02001511 /* Calculate 'min_idx_lebs' after journal replay */
1512 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
1513
1514 err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001515 if (err)
1516 goto out_orphans;
1517
Heiko Schocherf5895d12014-06-24 10:10:04 +02001518 if (!c->ro_mount) {
1519#ifndef __UBOOT__
1520 int lnum;
1521
1522 err = check_free_space(c);
1523 if (err)
1524 goto out_orphans;
1525
1526 /* Check for enough log space */
1527 lnum = c->lhead_lnum + 1;
1528 if (lnum >= UBIFS_LOG_LNUM + c->log_lebs)
1529 lnum = UBIFS_LOG_LNUM;
1530 if (lnum == c->ltail_lnum) {
1531 err = ubifs_consolidate_log(c);
1532 if (err)
1533 goto out_orphans;
1534 }
1535
1536 if (c->need_recovery) {
1537 err = ubifs_recover_size(c);
1538 if (err)
1539 goto out_orphans;
1540 err = ubifs_rcvry_gc_commit(c);
1541 if (err)
1542 goto out_orphans;
1543 } else {
1544 err = take_gc_lnum(c);
1545 if (err)
1546 goto out_orphans;
1547
1548 /*
1549 * GC LEB may contain garbage if there was an unclean
1550 * reboot, and it should be un-mapped.
1551 */
1552 err = ubifs_leb_unmap(c, c->gc_lnum);
1553 if (err)
1554 goto out_orphans;
1555 }
1556
1557 err = dbg_check_lprops(c);
1558 if (err)
1559 goto out_orphans;
1560#endif
1561 } else if (c->need_recovery) {
Stefan Roese2fc10f62009-03-19 15:35:05 +01001562 err = ubifs_recover_size(c);
1563 if (err)
1564 goto out_orphans;
Heiko Schocherf5895d12014-06-24 10:10:04 +02001565 } else {
1566 /*
1567 * Even if we mount read-only, we have to set space in GC LEB
1568 * to proper value because this affects UBIFS free space
1569 * reporting. We do not want to have a situation when
1570 * re-mounting from R/O to R/W changes amount of free space.
1571 */
1572 err = take_gc_lnum(c);
1573 if (err)
1574 goto out_orphans;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001575 }
1576
Heiko Schocherf5895d12014-06-24 10:10:04 +02001577#ifndef __UBOOT__
Stefan Roese2fc10f62009-03-19 15:35:05 +01001578 spin_lock(&ubifs_infos_lock);
1579 list_add_tail(&c->infos_list, &ubifs_infos);
1580 spin_unlock(&ubifs_infos_lock);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001581#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01001582
1583 if (c->need_recovery) {
Heiko Schocherf5895d12014-06-24 10:10:04 +02001584 if (c->ro_mount)
Stefan Roese2fc10f62009-03-19 15:35:05 +01001585 ubifs_msg("recovery deferred");
1586 else {
1587 c->need_recovery = 0;
1588 ubifs_msg("recovery completed");
Heiko Schocherf5895d12014-06-24 10:10:04 +02001589 /*
1590 * GC LEB has to be empty and taken at this point. But
1591 * the journal head LEBs may also be accounted as
1592 * "empty taken" if they are empty.
1593 */
1594 ubifs_assert(c->lst.taken_empty_lebs > 0);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001595 }
Heiko Schocherf5895d12014-06-24 10:10:04 +02001596 } else
1597 ubifs_assert(c->lst.taken_empty_lebs > 0);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001598
1599 err = dbg_check_filesystem(c);
1600 if (err)
1601 goto out_infos;
1602
Heiko Schocherf5895d12014-06-24 10:10:04 +02001603 err = dbg_debugfs_init_fs(c);
1604 if (err)
1605 goto out_infos;
1606
1607 c->mounting = 0;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001608
Heiko Schocherf5895d12014-06-24 10:10:04 +02001609 ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s",
1610 c->vi.ubi_num, c->vi.vol_id, c->vi.name,
1611 c->ro_mount ? ", R/O mode" : "");
Stefan Roese2fc10f62009-03-19 15:35:05 +01001612 x = (long long)c->main_lebs * c->leb_size;
Heiko Schocherf5895d12014-06-24 10:10:04 +02001613 y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
1614 ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes",
1615 c->leb_size, c->leb_size >> 10, c->min_io_size,
1616 c->max_write_size);
1617 ubifs_msg("FS size: %lld bytes (%lld MiB, %d LEBs), journal size %lld bytes (%lld MiB, %d LEBs)",
1618 x, x >> 20, c->main_lebs,
1619 y, y >> 20, c->log_lebs + c->max_bud_cnt);
1620 ubifs_msg("reserved for root: %llu bytes (%llu KiB)",
1621 c->report_rp_size, c->report_rp_size >> 10);
1622 ubifs_msg("media format: w%d/r%d (latest is w%d/r%d), UUID %pUB%s",
Artem Bityutskiy619697a2009-03-27 10:21:14 +01001623 c->fmt_version, c->ro_compat_version,
Heiko Schocherf5895d12014-06-24 10:10:04 +02001624 UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid,
1625 c->big_lpt ? ", big LPT model" : ", small LPT model");
Stefan Roese2fc10f62009-03-19 15:35:05 +01001626
Heiko Schocherf5895d12014-06-24 10:10:04 +02001627 dbg_gen("default compressor: %s", ubifs_compr_name(c->default_compr));
1628 dbg_gen("data journal heads: %d",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001629 c->jhead_cnt - NONDATA_JHEADS_CNT);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001630 dbg_gen("log LEBs: %d (%d - %d)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001631 c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001632 dbg_gen("LPT area LEBs: %d (%d - %d)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001633 c->lpt_lebs, c->lpt_first, c->lpt_last);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001634 dbg_gen("orphan area LEBs: %d (%d - %d)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001635 c->orph_lebs, c->orph_first, c->orph_last);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001636 dbg_gen("main area LEBs: %d (%d - %d)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001637 c->main_lebs, c->main_first, c->leb_cnt - 1);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001638 dbg_gen("index LEBs: %d", c->lst.idx_lebs);
1639 dbg_gen("total index bytes: %lld (%lld KiB, %lld MiB)",
1640 c->bi.old_idx_sz, c->bi.old_idx_sz >> 10,
1641 c->bi.old_idx_sz >> 20);
1642 dbg_gen("key hash type: %d", c->key_hash_type);
1643 dbg_gen("tree fanout: %d", c->fanout);
1644 dbg_gen("reserved GC LEB: %d", c->gc_lnum);
1645 dbg_gen("max. znode size %d", c->max_znode_sz);
1646 dbg_gen("max. index node size %d", c->max_idx_node_sz);
1647 dbg_gen("node sizes: data %zu, inode %zu, dentry %zu",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001648 UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001649 dbg_gen("node sizes: trun %zu, sb %zu, master %zu",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001650 UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001651 dbg_gen("node sizes: ref %zu, cmt. start %zu, orph %zu",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001652 UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001653 dbg_gen("max. node sizes: data %zu, inode %zu dentry %zu, idx %d",
Wolfgang Denkec7fbf52013-10-04 17:43:24 +02001654 UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
Heiko Schocherf5895d12014-06-24 10:10:04 +02001655 UBIFS_MAX_DENT_NODE_SZ, ubifs_idx_node_sz(c, c->fanout));
1656 dbg_gen("dead watermark: %d", c->dead_wm);
1657 dbg_gen("dark watermark: %d", c->dark_wm);
1658 dbg_gen("LEB overhead: %d", c->leb_overhead);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001659 x = (long long)c->main_lebs * c->dark_wm;
Heiko Schocherf5895d12014-06-24 10:10:04 +02001660 dbg_gen("max. dark space: %lld (%lld KiB, %lld MiB)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001661 x, x >> 10, x >> 20);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001662 dbg_gen("maximum bud bytes: %lld (%lld KiB, %lld MiB)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001663 c->max_bud_bytes, c->max_bud_bytes >> 10,
1664 c->max_bud_bytes >> 20);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001665 dbg_gen("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001666 c->bg_bud_bytes, c->bg_bud_bytes >> 10,
1667 c->bg_bud_bytes >> 20);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001668 dbg_gen("current bud bytes %lld (%lld KiB, %lld MiB)",
Stefan Roese2fc10f62009-03-19 15:35:05 +01001669 c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001670 dbg_gen("max. seq. number: %llu", c->max_sqnum);
1671 dbg_gen("commit number: %llu", c->cmt_no);
1672
1673 return 0;
1674
1675out_infos:
1676 spin_lock(&ubifs_infos_lock);
1677 list_del(&c->infos_list);
1678 spin_unlock(&ubifs_infos_lock);
1679out_orphans:
1680 free_orphans(c);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001681out_journal:
1682 destroy_journal(c);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001683out_lpt:
1684 ubifs_lpt_free(c, 0);
1685out_master:
1686 kfree(c->mst_node);
1687 kfree(c->rcvrd_mst_node);
1688 if (c->bgt)
1689 kthread_stop(c->bgt);
1690#ifndef __UBOOT__
1691out_wbufs:
1692#endif
1693 free_wbufs(c);
1694out_cbuf:
1695 kfree(c->cbuf);
1696out_free:
1697 kfree(c->write_reserve_buf);
1698 kfree(c->bu.buf);
1699 vfree(c->ileb_buf);
1700 vfree(c->sbuf);
1701 kfree(c->bottom_up_buf);
1702 ubifs_debugging_exit(c);
1703 return err;
1704}
1705
1706/**
1707 * ubifs_umount - un-mount UBIFS file-system.
1708 * @c: UBIFS file-system description object
1709 *
1710 * Note, this function is called to free allocated resourced when un-mounting,
1711 * as well as free resources when an error occurred while we were half way
1712 * through mounting (error path cleanup function). So it has to make sure the
1713 * resource was actually allocated before freeing it.
1714 */
1715#ifndef __UBOOT__
1716static void ubifs_umount(struct ubifs_info *c)
1717#else
1718void ubifs_umount(struct ubifs_info *c)
1719#endif
1720{
1721 dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num,
1722 c->vi.vol_id);
1723
1724 dbg_debugfs_exit_fs(c);
1725 spin_lock(&ubifs_infos_lock);
1726 list_del(&c->infos_list);
1727 spin_unlock(&ubifs_infos_lock);
1728
1729#ifndef __UBOOT__
1730 if (c->bgt)
1731 kthread_stop(c->bgt);
1732
1733 destroy_journal(c);
1734#endif
1735 free_wbufs(c);
1736 free_orphans(c);
1737 ubifs_lpt_free(c, 0);
1738
1739 kfree(c->cbuf);
1740 kfree(c->rcvrd_mst_node);
1741 kfree(c->mst_node);
1742 kfree(c->write_reserve_buf);
1743 kfree(c->bu.buf);
1744 vfree(c->ileb_buf);
1745 vfree(c->sbuf);
1746 kfree(c->bottom_up_buf);
1747 ubifs_debugging_exit(c);
1748#ifdef __UBOOT__
1749 /* Finally free U-Boot's global copy of superblock */
1750 if (ubifs_sb != NULL) {
1751 free(ubifs_sb->s_fs_info);
1752 free(ubifs_sb);
1753 }
1754#endif
1755}
1756
1757#ifndef __UBOOT__
1758/**
1759 * ubifs_remount_rw - re-mount in read-write mode.
1760 * @c: UBIFS file-system description object
1761 *
1762 * UBIFS avoids allocating many unnecessary resources when mounted in read-only
1763 * mode. This function allocates the needed resources and re-mounts UBIFS in
1764 * read-write mode.
1765 */
1766static int ubifs_remount_rw(struct ubifs_info *c)
1767{
1768 int err, lnum;
1769
1770 if (c->rw_incompat) {
1771 ubifs_err("the file-system is not R/W-compatible");
1772 ubifs_msg("on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d",
1773 c->fmt_version, c->ro_compat_version,
1774 UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
1775 return -EROFS;
1776 }
1777
1778 mutex_lock(&c->umount_mutex);
1779 dbg_save_space_info(c);
1780 c->remounting_rw = 1;
1781 c->ro_mount = 0;
1782
1783 if (c->space_fixup) {
1784 err = ubifs_fixup_free_space(c);
1785 if (err)
Heiko Schocher081fe9e2014-07-15 16:08:43 +02001786 goto out;
Heiko Schocherf5895d12014-06-24 10:10:04 +02001787 }
1788
1789 err = check_free_space(c);
1790 if (err)
1791 goto out;
1792
1793 if (c->old_leb_cnt != c->leb_cnt) {
1794 struct ubifs_sb_node *sup;
1795
1796 sup = ubifs_read_sb_node(c);
1797 if (IS_ERR(sup)) {
1798 err = PTR_ERR(sup);
1799 goto out;
1800 }
1801 sup->leb_cnt = cpu_to_le32(c->leb_cnt);
1802 err = ubifs_write_sb_node(c, sup);
1803 kfree(sup);
1804 if (err)
1805 goto out;
1806 }
1807
1808 if (c->need_recovery) {
1809 ubifs_msg("completing deferred recovery");
1810 err = ubifs_write_rcvrd_mst_node(c);
1811 if (err)
1812 goto out;
1813 err = ubifs_recover_size(c);
1814 if (err)
1815 goto out;
1816 err = ubifs_clean_lebs(c, c->sbuf);
1817 if (err)
1818 goto out;
1819 err = ubifs_recover_inl_heads(c, c->sbuf);
1820 if (err)
1821 goto out;
1822 } else {
1823 /* A readonly mount is not allowed to have orphans */
1824 ubifs_assert(c->tot_orphans == 0);
1825 err = ubifs_clear_orphans(c);
1826 if (err)
1827 goto out;
1828 }
1829
1830 if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) {
1831 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
1832 err = ubifs_write_master(c);
1833 if (err)
1834 goto out;
1835 }
1836
1837 c->ileb_buf = vmalloc(c->leb_size);
1838 if (!c->ileb_buf) {
1839 err = -ENOMEM;
1840 goto out;
1841 }
1842
1843 c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ, GFP_KERNEL);
1844 if (!c->write_reserve_buf) {
1845 err = -ENOMEM;
1846 goto out;
1847 }
1848
1849 err = ubifs_lpt_init(c, 0, 1);
1850 if (err)
1851 goto out;
1852
1853 /* Create background thread */
1854 c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name);
1855 if (IS_ERR(c->bgt)) {
1856 err = PTR_ERR(c->bgt);
1857 c->bgt = NULL;
1858 ubifs_err("cannot spawn \"%s\", error %d",
1859 c->bgt_name, err);
1860 goto out;
1861 }
1862 wake_up_process(c->bgt);
1863
1864 c->orph_buf = vmalloc(c->leb_size);
1865 if (!c->orph_buf) {
1866 err = -ENOMEM;
1867 goto out;
1868 }
1869
1870 /* Check for enough log space */
1871 lnum = c->lhead_lnum + 1;
1872 if (lnum >= UBIFS_LOG_LNUM + c->log_lebs)
1873 lnum = UBIFS_LOG_LNUM;
1874 if (lnum == c->ltail_lnum) {
1875 err = ubifs_consolidate_log(c);
1876 if (err)
1877 goto out;
1878 }
Stefan Roese2fc10f62009-03-19 15:35:05 +01001879
Heiko Schocherf5895d12014-06-24 10:10:04 +02001880 if (c->need_recovery)
1881 err = ubifs_rcvry_gc_commit(c);
1882 else
1883 err = ubifs_leb_unmap(c, c->gc_lnum);
1884 if (err)
1885 goto out;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001886
Heiko Schocherf5895d12014-06-24 10:10:04 +02001887 dbg_gen("re-mounted read-write");
1888 c->remounting_rw = 0;
1889
1890 if (c->need_recovery) {
1891 c->need_recovery = 0;
1892 ubifs_msg("deferred recovery completed");
1893 } else {
1894 /*
1895 * Do not run the debugging space check if the were doing
1896 * recovery, because when we saved the information we had the
1897 * file-system in a state where the TNC and lprops has been
1898 * modified in memory, but all the I/O operations (including a
1899 * commit) were deferred. So the file-system was in
1900 * "non-committed" state. Now the file-system is in committed
1901 * state, and of course the amount of free space will change
1902 * because, for example, the old index size was imprecise.
1903 */
1904 err = dbg_check_space_info(c);
1905 }
1906
1907 mutex_unlock(&c->umount_mutex);
1908 return err;
1909
1910out:
1911 c->ro_mount = 1;
1912 vfree(c->orph_buf);
1913 c->orph_buf = NULL;
1914 if (c->bgt) {
Stefan Roese2fc10f62009-03-19 15:35:05 +01001915 kthread_stop(c->bgt);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001916 c->bgt = NULL;
1917 }
1918 free_wbufs(c);
1919 kfree(c->write_reserve_buf);
1920 c->write_reserve_buf = NULL;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001921 vfree(c->ileb_buf);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001922 c->ileb_buf = NULL;
1923 ubifs_lpt_free(c, 1);
1924 c->remounting_rw = 0;
1925 mutex_unlock(&c->umount_mutex);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001926 return err;
1927}
1928
1929/**
Heiko Schocherf5895d12014-06-24 10:10:04 +02001930 * ubifs_remount_ro - re-mount in read-only mode.
Stefan Roese2fc10f62009-03-19 15:35:05 +01001931 * @c: UBIFS file-system description object
1932 *
Heiko Schocherf5895d12014-06-24 10:10:04 +02001933 * We assume VFS has stopped writing. Possibly the background thread could be
1934 * running a commit, however kthread_stop will wait in that case.
Stefan Roese2fc10f62009-03-19 15:35:05 +01001935 */
Heiko Schocherf5895d12014-06-24 10:10:04 +02001936static void ubifs_remount_ro(struct ubifs_info *c)
Stefan Roese2fc10f62009-03-19 15:35:05 +01001937{
Heiko Schocherf5895d12014-06-24 10:10:04 +02001938 int i, err;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001939
Heiko Schocherf5895d12014-06-24 10:10:04 +02001940 ubifs_assert(!c->need_recovery);
1941 ubifs_assert(!c->ro_mount);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001942
Heiko Schocherf5895d12014-06-24 10:10:04 +02001943 mutex_lock(&c->umount_mutex);
1944 if (c->bgt) {
Stefan Roese2fc10f62009-03-19 15:35:05 +01001945 kthread_stop(c->bgt);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001946 c->bgt = NULL;
1947 }
Stefan Roese2fc10f62009-03-19 15:35:05 +01001948
Heiko Schocherf5895d12014-06-24 10:10:04 +02001949 dbg_save_space_info(c);
Stefan Roese2fc10f62009-03-19 15:35:05 +01001950
Heiko Schocherf5895d12014-06-24 10:10:04 +02001951 for (i = 0; i < c->jhead_cnt; i++)
1952 ubifs_wbuf_sync(&c->jheads[i].wbuf);
1953
1954 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
1955 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
1956 c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
1957 err = ubifs_write_master(c);
1958 if (err)
1959 ubifs_ro_mode(c, err);
1960
1961 vfree(c->orph_buf);
1962 c->orph_buf = NULL;
1963 kfree(c->write_reserve_buf);
1964 c->write_reserve_buf = NULL;
Stefan Roese2fc10f62009-03-19 15:35:05 +01001965 vfree(c->ileb_buf);
Heiko Schocherf5895d12014-06-24 10:10:04 +02001966 c->ileb_buf = NULL;
1967 ubifs_lpt_free(c, 1);
1968 c->ro_mount = 1;
1969 err = dbg_check_space_info(c);
1970 if (err)
1971 ubifs_ro_mode(c, err);
1972 mutex_unlock(&c->umount_mutex);
1973}
Stefan Roese2fc10f62009-03-19 15:35:05 +01001974
Heiko Schocherf5895d12014-06-24 10:10:04 +02001975static void ubifs_put_super(struct super_block *sb)
1976{
1977 int i;
1978 struct ubifs_info *c = sb->s_fs_info;
1979
1980 ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num,
1981 c->vi.vol_id);
1982
1983 /*
1984 * The following asserts are only valid if there has not been a failure
1985 * of the media. For example, there will be dirty inodes if we failed
1986 * to write them back because of I/O errors.
1987 */
1988 if (!c->ro_error) {
1989 ubifs_assert(c->bi.idx_growth == 0);
1990 ubifs_assert(c->bi.dd_growth == 0);
1991 ubifs_assert(c->bi.data_growth == 0);
1992 }
1993
1994 /*
1995 * The 'c->umount_lock' prevents races between UBIFS memory shrinker
1996 * and file system un-mount. Namely, it prevents the shrinker from
1997 * picking this superblock for shrinking - it will be just skipped if
1998 * the mutex is locked.
1999 */
2000 mutex_lock(&c->umount_mutex);
2001 if (!c->ro_mount) {
2002 /*
2003 * First of all kill the background thread to make sure it does
2004 * not interfere with un-mounting and freeing resources.
2005 */
2006 if (c->bgt) {
2007 kthread_stop(c->bgt);
2008 c->bgt = NULL;
2009 }
2010
2011 /*
2012 * On fatal errors c->ro_error is set to 1, in which case we do
2013 * not write the master node.
2014 */
2015 if (!c->ro_error) {
2016 int err;
2017
2018 /* Synchronize write-buffers */
2019 for (i = 0; i < c->jhead_cnt; i++)
2020 ubifs_wbuf_sync(&c->jheads[i].wbuf);
2021
2022 /*
2023 * We are being cleanly unmounted which means the
2024 * orphans were killed - indicate this in the master
2025 * node. Also save the reserved GC LEB number.
2026 */
2027 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
2028 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
2029 c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
2030 err = ubifs_write_master(c);
2031 if (err)
2032 /*
2033 * Recovery will attempt to fix the master area
2034 * next mount, so we just print a message and
2035 * continue to unmount normally.
2036 */
2037 ubifs_err("failed to write master node, error %d",
2038 err);
2039 } else {
2040#ifndef __UBOOT__
2041 for (i = 0; i < c->jhead_cnt; i++)
2042 /* Make sure write-buffer timers are canceled */
2043 hrtimer_cancel(&c->jheads[i].wbuf.timer);
2044#endif
2045 }
2046 }
2047
2048 ubifs_umount(c);
2049#ifndef __UBOOT__
2050 bdi_destroy(&c->bdi);
2051#endif
2052 ubi_close_volume(c->ubi);
2053 mutex_unlock(&c->umount_mutex);
2054}
2055#endif
2056
2057#ifndef __UBOOT__
2058static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
2059{
2060 int err;
2061 struct ubifs_info *c = sb->s_fs_info;
2062
Heiko Schocher081fe9e2014-07-15 16:08:43 +02002063 sync_filesystem(sb);
Heiko Schocherf5895d12014-06-24 10:10:04 +02002064 dbg_gen("old flags %#lx, new flags %#x", sb->s_flags, *flags);
2065
2066 err = ubifs_parse_options(c, data, 1);
2067 if (err) {
2068 ubifs_err("invalid or unknown remount parameter");
2069 return err;
2070 }
2071
2072 if (c->ro_mount && !(*flags & MS_RDONLY)) {
2073 if (c->ro_error) {
2074 ubifs_msg("cannot re-mount R/W due to prior errors");
2075 return -EROFS;
2076 }
2077 if (c->ro_media) {
2078 ubifs_msg("cannot re-mount R/W - UBI volume is R/O");
2079 return -EROFS;
2080 }
2081 err = ubifs_remount_rw(c);
2082 if (err)
2083 return err;
2084 } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
2085 if (c->ro_error) {
2086 ubifs_msg("cannot re-mount R/O due to prior errors");
2087 return -EROFS;
2088 }
2089 ubifs_remount_ro(c);
Lars Poeschel384b1fb2011-10-12 11:31:19 +02002090 }
Heiko Schocherf5895d12014-06-24 10:10:04 +02002091
2092 if (c->bulk_read == 1)
2093 bu_init(c);
2094 else {
2095 dbg_gen("disable bulk-read");
2096 kfree(c->bu.buf);
2097 c->bu.buf = NULL;
2098 }
2099
2100 ubifs_assert(c->lst.taken_empty_lebs > 0);
2101 return 0;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002102}
Heiko Schocherf5895d12014-06-24 10:10:04 +02002103#endif
2104
2105const struct super_operations ubifs_super_operations = {
2106 .alloc_inode = ubifs_alloc_inode,
2107#ifndef __UBOOT__
2108 .destroy_inode = ubifs_destroy_inode,
2109 .put_super = ubifs_put_super,
2110 .write_inode = ubifs_write_inode,
2111 .evict_inode = ubifs_evict_inode,
2112 .statfs = ubifs_statfs,
2113#endif
2114 .dirty_inode = ubifs_dirty_inode,
2115#ifndef __UBOOT__
2116 .remount_fs = ubifs_remount_fs,
2117 .show_options = ubifs_show_options,
2118 .sync_fs = ubifs_sync_fs,
2119#endif
2120};
Stefan Roese2fc10f62009-03-19 15:35:05 +01002121
2122/**
2123 * open_ubi - parse UBI device name string and open the UBI device.
2124 * @name: UBI volume name
2125 * @mode: UBI volume open mode
2126 *
Heiko Schocherf5895d12014-06-24 10:10:04 +02002127 * The primary method of mounting UBIFS is by specifying the UBI volume
2128 * character device node path. However, UBIFS may also be mounted withoug any
2129 * character device node using one of the following methods:
2130 *
2131 * o ubiX_Y - mount UBI device number X, volume Y;
2132 * o ubiY - mount UBI device number 0, volume Y;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002133 * o ubiX:NAME - mount UBI device X, volume with name NAME;
2134 * o ubi:NAME - mount UBI device 0, volume with name NAME.
2135 *
2136 * Alternative '!' separator may be used instead of ':' (because some shells
2137 * like busybox may interpret ':' as an NFS host name separator). This function
Heiko Schocherf5895d12014-06-24 10:10:04 +02002138 * returns UBI volume description object in case of success and a negative
2139 * error code in case of failure.
Stefan Roese2fc10f62009-03-19 15:35:05 +01002140 */
2141static struct ubi_volume_desc *open_ubi(const char *name, int mode)
2142{
Heiko Schocherf5895d12014-06-24 10:10:04 +02002143#ifndef __UBOOT__
2144 struct ubi_volume_desc *ubi;
2145#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002146 int dev, vol;
2147 char *endptr;
2148
Heiko Schocherf5895d12014-06-24 10:10:04 +02002149#ifndef __UBOOT__
2150 /* First, try to open using the device node path method */
2151 ubi = ubi_open_volume_path(name, mode);
2152 if (!IS_ERR(ubi))
2153 return ubi;
2154#endif
2155
2156 /* Try the "nodev" method */
Stefan Roese2fc10f62009-03-19 15:35:05 +01002157 if (name[0] != 'u' || name[1] != 'b' || name[2] != 'i')
2158 return ERR_PTR(-EINVAL);
2159
2160 /* ubi:NAME method */
2161 if ((name[3] == ':' || name[3] == '!') && name[4] != '\0')
2162 return ubi_open_volume_nm(0, name + 4, mode);
2163
2164 if (!isdigit(name[3]))
2165 return ERR_PTR(-EINVAL);
2166
2167 dev = simple_strtoul(name + 3, &endptr, 0);
2168
2169 /* ubiY method */
2170 if (*endptr == '\0')
2171 return ubi_open_volume(0, dev, mode);
2172
2173 /* ubiX_Y method */
2174 if (*endptr == '_' && isdigit(endptr[1])) {
2175 vol = simple_strtoul(endptr + 1, &endptr, 0);
2176 if (*endptr != '\0')
2177 return ERR_PTR(-EINVAL);
2178 return ubi_open_volume(dev, vol, mode);
2179 }
2180
2181 /* ubiX:NAME method */
2182 if ((*endptr == ':' || *endptr == '!') && endptr[1] != '\0')
2183 return ubi_open_volume_nm(dev, ++endptr, mode);
2184
2185 return ERR_PTR(-EINVAL);
2186}
2187
Heiko Schocherf5895d12014-06-24 10:10:04 +02002188static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
Stefan Roese2fc10f62009-03-19 15:35:05 +01002189{
Stefan Roese2fc10f62009-03-19 15:35:05 +01002190 struct ubifs_info *c;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002191
2192 c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
Heiko Schocherf5895d12014-06-24 10:10:04 +02002193 if (c) {
2194 spin_lock_init(&c->cnt_lock);
2195 spin_lock_init(&c->cs_lock);
2196 spin_lock_init(&c->buds_lock);
2197 spin_lock_init(&c->space_lock);
2198 spin_lock_init(&c->orphan_lock);
2199 init_rwsem(&c->commit_sem);
2200 mutex_init(&c->lp_mutex);
2201 mutex_init(&c->tnc_mutex);
2202 mutex_init(&c->log_mutex);
2203 mutex_init(&c->mst_mutex);
2204 mutex_init(&c->umount_mutex);
2205 mutex_init(&c->bu_mutex);
2206 mutex_init(&c->write_reserve_mutex);
2207 init_waitqueue_head(&c->cmt_wq);
2208 c->buds = RB_ROOT;
2209 c->old_idx = RB_ROOT;
2210 c->size_tree = RB_ROOT;
2211 c->orph_tree = RB_ROOT;
2212 INIT_LIST_HEAD(&c->infos_list);
2213 INIT_LIST_HEAD(&c->idx_gc);
2214 INIT_LIST_HEAD(&c->replay_list);
2215 INIT_LIST_HEAD(&c->replay_buds);
2216 INIT_LIST_HEAD(&c->uncat_list);
2217 INIT_LIST_HEAD(&c->empty_list);
2218 INIT_LIST_HEAD(&c->freeable_list);
2219 INIT_LIST_HEAD(&c->frdi_idx_list);
2220 INIT_LIST_HEAD(&c->unclean_leb_list);
2221 INIT_LIST_HEAD(&c->old_buds);
2222 INIT_LIST_HEAD(&c->orph_list);
2223 INIT_LIST_HEAD(&c->orph_new);
2224 c->no_chk_data_crc = 1;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002225
Heiko Schocherf5895d12014-06-24 10:10:04 +02002226 c->highest_inum = UBIFS_FIRST_INO;
2227 c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002228
Heiko Schocherf5895d12014-06-24 10:10:04 +02002229 ubi_get_volume_info(ubi, &c->vi);
2230 ubi_get_device_info(c->vi.ubi_num, &c->di);
2231 }
2232 return c;
2233}
Stefan Roese2fc10f62009-03-19 15:35:05 +01002234
Heiko Schocherf5895d12014-06-24 10:10:04 +02002235static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
2236{
2237 struct ubifs_info *c = sb->s_fs_info;
2238 struct inode *root;
2239 int err;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002240
Heiko Schocherf5895d12014-06-24 10:10:04 +02002241 c->vfs_sb = sb;
Heiko Schocherb24c4272014-07-15 16:08:42 +02002242#ifndef __UBOOT__
Stefan Roese2fc10f62009-03-19 15:35:05 +01002243 /* Re-open the UBI device in read-write mode */
Heiko Schocherf5895d12014-06-24 10:10:04 +02002244 c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE);
Heiko Schocherb24c4272014-07-15 16:08:42 +02002245#else
2246 /* U-Boot read only mode */
2247 c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READONLY);
2248#endif
2249
Stefan Roese2fc10f62009-03-19 15:35:05 +01002250 if (IS_ERR(c->ubi)) {
2251 err = PTR_ERR(c->ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +02002252 goto out;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002253 }
2254
Heiko Schocherf5895d12014-06-24 10:10:04 +02002255#ifndef __UBOOT__
2256 /*
2257 * UBIFS provides 'backing_dev_info' in order to disable read-ahead. For
2258 * UBIFS, I/O is not deferred, it is done immediately in readpage,
2259 * which means the user would have to wait not just for their own I/O
2260 * but the read-ahead I/O as well i.e. completely pointless.
2261 *
2262 * Read-ahead will be disabled because @c->bdi.ra_pages is 0.
2263 */
2264 co>bdi.name = "ubifs",
2265 c->bdi.capabilities = BDI_CAP_MAP_COPY;
2266 err = bdi_init(&c->bdi);
2267 if (err)
2268 goto out_close;
2269 err = bdi_register(&c->bdi, NULL, "ubifs_%d_%d",
2270 c->vi.ubi_num, c->vi.vol_id);
2271 if (err)
2272 goto out_bdi;
2273
2274 err = ubifs_parse_options(c, data, 0);
2275 if (err)
2276 goto out_bdi;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002277
Heiko Schocherf5895d12014-06-24 10:10:04 +02002278 sb->s_bdi = &c->bdi;
2279#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002280 sb->s_fs_info = c;
2281 sb->s_magic = UBIFS_SUPER_MAGIC;
2282 sb->s_blocksize = UBIFS_BLOCK_SIZE;
2283 sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002284 sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c);
2285 if (c->max_inode_sz > MAX_LFS_FILESIZE)
2286 sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
Heiko Schocherf5895d12014-06-24 10:10:04 +02002287 sb->s_op = &ubifs_super_operations;
Artem Bityutskiy619697a2009-03-27 10:21:14 +01002288
Stefan Roese2fc10f62009-03-19 15:35:05 +01002289 mutex_lock(&c->umount_mutex);
2290 err = mount_ubifs(c);
2291 if (err) {
2292 ubifs_assert(err < 0);
2293 goto out_unlock;
2294 }
2295
2296 /* Read the root inode */
2297 root = ubifs_iget(sb, UBIFS_ROOT_INO);
2298 if (IS_ERR(root)) {
2299 err = PTR_ERR(root);
2300 goto out_umount;
2301 }
2302
Heiko Schocherf5895d12014-06-24 10:10:04 +02002303#ifndef __UBOOT__
2304 sb->s_root = d_make_root(root);
2305 if (!sb->s_root) {
2306 err = -ENOMEM;
2307 goto out_umount;
2308 }
2309#else
Stefan Roese2fc10f62009-03-19 15:35:05 +01002310 sb->s_root = NULL;
Heiko Schocherf5895d12014-06-24 10:10:04 +02002311#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002312
2313 mutex_unlock(&c->umount_mutex);
2314 return 0;
2315
2316out_umount:
2317 ubifs_umount(c);
2318out_unlock:
2319 mutex_unlock(&c->umount_mutex);
Heiko Schocherf5895d12014-06-24 10:10:04 +02002320#ifndef __UBOOT__
2321out_bdi:
2322 bdi_destroy(&c->bdi);
2323out_close:
2324#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002325 ubi_close_volume(c->ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +02002326out:
Stefan Roese2fc10f62009-03-19 15:35:05 +01002327 return err;
2328}
2329
2330static int sb_test(struct super_block *sb, void *data)
2331{
Heiko Schocherf5895d12014-06-24 10:10:04 +02002332 struct ubifs_info *c1 = data;
2333 struct ubifs_info *c = sb->s_fs_info;
2334
2335 return c->vi.cdev == c1->vi.cdev;
2336}
2337
2338static int sb_set(struct super_block *sb, void *data)
2339{
2340 sb->s_fs_info = data;
2341 return set_anon_super(sb, NULL);
2342}
2343
2344static struct super_block *alloc_super(struct file_system_type *type, int flags)
2345{
2346 struct super_block *s;
2347 int err;
2348
2349 s = kzalloc(sizeof(struct super_block), GFP_USER);
2350 if (!s) {
2351 err = -ENOMEM;
2352 return ERR_PTR(err);
2353 }
2354
2355 INIT_HLIST_NODE(&s->s_instances);
2356 INIT_LIST_HEAD(&s->s_inodes);
2357 s->s_time_gran = 1000000000;
2358 s->s_flags = flags;
2359
2360 return s;
2361}
2362
2363/**
2364 * sget - find or create a superblock
2365 * @type: filesystem type superblock should belong to
2366 * @test: comparison callback
2367 * @set: setup callback
2368 * @flags: mount flags
2369 * @data: argument to each of them
2370 */
2371struct super_block *sget(struct file_system_type *type,
2372 int (*test)(struct super_block *,void *),
2373 int (*set)(struct super_block *,void *),
2374 int flags,
2375 void *data)
2376{
2377 struct super_block *s = NULL;
2378#ifndef __UBOOT__
2379 struct super_block *old;
2380#endif
2381 int err;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002382
Heiko Schocherf5895d12014-06-24 10:10:04 +02002383#ifndef __UBOOT__
2384retry:
2385 spin_lock(&sb_lock);
2386 if (test) {
2387 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
2388 if (!test(old, data))
2389 continue;
2390 if (!grab_super(old))
2391 goto retry;
2392 if (s) {
2393 up_write(&s->s_umount);
2394 destroy_super(s);
2395 s = NULL;
2396 }
2397 return old;
2398 }
2399 }
2400#endif
2401 if (!s) {
2402 spin_unlock(&sb_lock);
2403 s = alloc_super(type, flags);
2404 if (!s)
2405 return ERR_PTR(-ENOMEM);
2406#ifndef __UBOOT__
2407 goto retry;
2408#endif
2409 }
2410
2411 err = set(s, data);
2412 if (err) {
2413#ifndef __UBOOT__
2414 spin_unlock(&sb_lock);
2415 up_write(&s->s_umount);
2416 destroy_super(s);
2417#endif
2418 return ERR_PTR(err);
2419 }
2420 s->s_type = type;
2421#ifndef __UBOOT__
2422 strlcpy(s->s_id, type->name, sizeof(s->s_id));
2423#else
2424 strncpy(s->s_id, type->name, sizeof(s->s_id));
2425#endif
2426 list_add_tail(&s->s_list, &super_blocks);
2427 hlist_add_head(&s->s_instances, &type->fs_supers);
2428#ifndef __UBOOT__
2429 spin_unlock(&sb_lock);
2430 get_filesystem(type);
2431 register_shrinker(&s->s_shrink);
2432#endif
2433 return s;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002434}
2435
Heiko Schocherf5895d12014-06-24 10:10:04 +02002436EXPORT_SYMBOL(sget);
2437
2438
2439static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2440 const char *name, void *data)
Stefan Roese2fc10f62009-03-19 15:35:05 +01002441{
2442 struct ubi_volume_desc *ubi;
Heiko Schocherf5895d12014-06-24 10:10:04 +02002443 struct ubifs_info *c;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002444 struct super_block *sb;
2445 int err;
2446
2447 dbg_gen("name %s, flags %#x", name, flags);
2448
2449 /*
2450 * Get UBI device number and volume ID. Mount it read-only so far
2451 * because this might be a new mount point, and UBI allows only one
2452 * read-write user at a time.
2453 */
2454 ubi = open_ubi(name, UBI_READONLY);
2455 if (IS_ERR(ubi)) {
2456 ubifs_err("cannot open \"%s\", error %d",
2457 name, (int)PTR_ERR(ubi));
Heiko Schocherf5895d12014-06-24 10:10:04 +02002458 return ERR_CAST(ubi);
2459 }
2460
2461 c = alloc_ubifs_info(ubi);
2462 if (!c) {
2463 err = -ENOMEM;
2464 goto out_close;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002465 }
Stefan Roese2fc10f62009-03-19 15:35:05 +01002466
Heiko Schocherf5895d12014-06-24 10:10:04 +02002467 dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002468
Heiko Schocherf5895d12014-06-24 10:10:04 +02002469 sb = sget(fs_type, sb_test, sb_set, flags, c);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002470 if (IS_ERR(sb)) {
2471 err = PTR_ERR(sb);
Heiko Schocherf5895d12014-06-24 10:10:04 +02002472 kfree(c);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002473 goto out_close;
2474 }
2475
2476 if (sb->s_root) {
Heiko Schocherf5895d12014-06-24 10:10:04 +02002477 struct ubifs_info *c1 = sb->s_fs_info;
2478 kfree(c);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002479 /* A new mount point for already mounted UBIFS */
2480 dbg_gen("this ubi volume is already mounted");
Heiko Schocherf5895d12014-06-24 10:10:04 +02002481 if (!!(flags & MS_RDONLY) != c1->ro_mount) {
Stefan Roese2fc10f62009-03-19 15:35:05 +01002482 err = -EBUSY;
2483 goto out_deact;
2484 }
2485 } else {
Stefan Roese2fc10f62009-03-19 15:35:05 +01002486 err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
2487 if (err)
2488 goto out_deact;
2489 /* We do not support atime */
2490 sb->s_flags |= MS_ACTIVE | MS_NOATIME;
2491 }
2492
2493 /* 'fill_super()' opens ubi again so we must close it here */
2494 ubi_close_volume(ubi);
2495
Heiko Schocherf5895d12014-06-24 10:10:04 +02002496#ifdef __UBOOT__
Stefan Roese2fc10f62009-03-19 15:35:05 +01002497 ubifs_sb = sb;
2498 return 0;
Heiko Schocherf5895d12014-06-24 10:10:04 +02002499#else
2500 return dget(sb->s_root);
2501#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002502
2503out_deact:
Heiko Schocherf5895d12014-06-24 10:10:04 +02002504#ifndef __UBOOT__
2505 deactivate_locked_super(sb);
2506#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002507out_close:
2508 ubi_close_volume(ubi);
Heiko Schocherf5895d12014-06-24 10:10:04 +02002509 return ERR_PTR(err);
2510}
2511
2512static void kill_ubifs_super(struct super_block *s)
2513{
2514 struct ubifs_info *c = s->s_fs_info;
2515#ifndef __UBOOT__
2516 kill_anon_super(s);
2517#endif
2518 kfree(c);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002519}
2520
Heiko Schocherf5895d12014-06-24 10:10:04 +02002521static struct file_system_type ubifs_fs_type = {
2522 .name = "ubifs",
2523 .owner = THIS_MODULE,
2524 .mount = ubifs_mount,
2525 .kill_sb = kill_ubifs_super,
2526};
2527#ifndef __UBOOT__
2528MODULE_ALIAS_FS("ubifs");
2529
2530/*
2531 * Inode slab cache constructor.
2532 */
2533static void inode_slab_ctor(void *obj)
2534{
2535 struct ubifs_inode *ui = obj;
2536 inode_init_once(&ui->vfs_inode);
2537}
2538
2539static int __init ubifs_init(void)
2540#else
2541int ubifs_init(void)
2542#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002543{
2544 int err;
2545
2546 BUILD_BUG_ON(sizeof(struct ubifs_ch) != 24);
2547
2548 /* Make sure node sizes are 8-byte aligned */
2549 BUILD_BUG_ON(UBIFS_CH_SZ & 7);
2550 BUILD_BUG_ON(UBIFS_INO_NODE_SZ & 7);
2551 BUILD_BUG_ON(UBIFS_DENT_NODE_SZ & 7);
2552 BUILD_BUG_ON(UBIFS_XENT_NODE_SZ & 7);
2553 BUILD_BUG_ON(UBIFS_DATA_NODE_SZ & 7);
2554 BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ & 7);
2555 BUILD_BUG_ON(UBIFS_SB_NODE_SZ & 7);
2556 BUILD_BUG_ON(UBIFS_MST_NODE_SZ & 7);
2557 BUILD_BUG_ON(UBIFS_REF_NODE_SZ & 7);
2558 BUILD_BUG_ON(UBIFS_CS_NODE_SZ & 7);
2559 BUILD_BUG_ON(UBIFS_ORPH_NODE_SZ & 7);
2560
2561 BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ & 7);
2562 BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ & 7);
2563 BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ & 7);
2564 BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ & 7);
2565 BUILD_BUG_ON(UBIFS_MAX_NODE_SZ & 7);
2566 BUILD_BUG_ON(MIN_WRITE_SZ & 7);
2567
2568 /* Check min. node size */
2569 BUILD_BUG_ON(UBIFS_INO_NODE_SZ < MIN_WRITE_SZ);
2570 BUILD_BUG_ON(UBIFS_DENT_NODE_SZ < MIN_WRITE_SZ);
2571 BUILD_BUG_ON(UBIFS_XENT_NODE_SZ < MIN_WRITE_SZ);
2572 BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ < MIN_WRITE_SZ);
2573
2574 BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ > UBIFS_MAX_NODE_SZ);
2575 BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ > UBIFS_MAX_NODE_SZ);
2576 BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ > UBIFS_MAX_NODE_SZ);
2577 BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ > UBIFS_MAX_NODE_SZ);
2578
2579 /* Defined node sizes */
2580 BUILD_BUG_ON(UBIFS_SB_NODE_SZ != 4096);
2581 BUILD_BUG_ON(UBIFS_MST_NODE_SZ != 512);
2582 BUILD_BUG_ON(UBIFS_INO_NODE_SZ != 160);
2583 BUILD_BUG_ON(UBIFS_REF_NODE_SZ != 64);
2584
2585 /*
2586 * We use 2 bit wide bit-fields to store compression type, which should
2587 * be amended if more compressors are added. The bit-fields are:
2588 * @compr_type in 'struct ubifs_inode', @default_compr in
2589 * 'struct ubifs_info' and @compr_type in 'struct ubifs_mount_opts'.
2590 */
2591 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
2592
2593 /*
2594 * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to
2595 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
2596 */
2597 if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
Heiko Schocherf5895d12014-06-24 10:10:04 +02002598 ubifs_err("VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes",
Stefan Roese2fc10f62009-03-19 15:35:05 +01002599 (unsigned int)PAGE_CACHE_SIZE);
2600 return -EINVAL;
2601 }
2602
Heiko Schocherf5895d12014-06-24 10:10:04 +02002603#ifndef __UBOOT__
2604 ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
2605 sizeof(struct ubifs_inode), 0,
2606 SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
2607 &inode_slab_ctor);
2608 if (!ubifs_inode_slab)
2609 return -ENOMEM;
2610
2611 register_shrinker(&ubifs_shrinker_info);
2612#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002613
2614 err = ubifs_compressors_init();
2615 if (err)
2616 goto out_shrinker;
2617
Heiko Schocherf5895d12014-06-24 10:10:04 +02002618#ifndef __UBOOT__
2619 err = dbg_debugfs_init();
2620 if (err)
2621 goto out_compr;
2622
2623 err = register_filesystem(&ubifs_fs_type);
2624 if (err) {
2625 ubifs_err("cannot register file system, error %d", err);
2626 goto out_dbg;
2627 }
2628#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002629 return 0;
2630
Heiko Schocherf5895d12014-06-24 10:10:04 +02002631#ifndef __UBOOT__
2632out_dbg:
2633 dbg_debugfs_exit();
2634out_compr:
2635 ubifs_compressors_exit();
2636#endif
Stefan Roese2fc10f62009-03-19 15:35:05 +01002637out_shrinker:
Heiko Schocherf5895d12014-06-24 10:10:04 +02002638#ifndef __UBOOT__
2639 unregister_shrinker(&ubifs_shrinker_info);
2640#endif
2641 kmem_cache_destroy(ubifs_inode_slab);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002642 return err;
2643}
Heiko Schocherf5895d12014-06-24 10:10:04 +02002644/* late_initcall to let compressors initialize first */
2645late_initcall(ubifs_init);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002646
Heiko Schocherf5895d12014-06-24 10:10:04 +02002647#ifndef __UBOOT__
2648static void __exit ubifs_exit(void)
2649{
2650 ubifs_assert(list_empty(&ubifs_infos));
2651 ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002652
Heiko Schocherf5895d12014-06-24 10:10:04 +02002653 dbg_debugfs_exit();
2654 ubifs_compressors_exit();
2655 unregister_shrinker(&ubifs_shrinker_info);
Stefan Roese2fc10f62009-03-19 15:35:05 +01002656
Heiko Schocherf5895d12014-06-24 10:10:04 +02002657 /*
2658 * Make sure all delayed rcu free inodes are flushed before we
2659 * destroy cache.
2660 */
2661 rcu_barrier();
2662 kmem_cache_destroy(ubifs_inode_slab);
2663 unregister_filesystem(&ubifs_fs_type);
2664}
2665module_exit(ubifs_exit);
2666
2667MODULE_LICENSE("GPL");
2668MODULE_VERSION(__stringify(UBIFS_VERSION));
2669MODULE_AUTHOR("Artem Bityutskiy, Adrian Hunter");
2670MODULE_DESCRIPTION("UBIFS - UBI File System");
2671#else
2672int uboot_ubifs_mount(char *vol_name)
Stefan Roese2fc10f62009-03-19 15:35:05 +01002673{
Heiko Schocherf5895d12014-06-24 10:10:04 +02002674 struct dentry *ret;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002675 int flags;
Stefan Roese2fc10f62009-03-19 15:35:05 +01002676
2677 /*
2678 * First unmount if allready mounted
2679 */
2680 if (ubifs_sb)
2681 ubifs_umount(ubifs_sb->s_fs_info);
2682
Stefan Roese2fc10f62009-03-19 15:35:05 +01002683 /*
2684 * Mount in read-only mode
2685 */
2686 flags = MS_RDONLY;
Heiko Schocherf5895d12014-06-24 10:10:04 +02002687 ret = ubifs_mount(&ubifs_fs_type, flags, vol_name, NULL);
2688 if (IS_ERR(ret)) {
2689 printf("Error reading superblock on volume '%s' " \
2690 "errno=%d!\n", vol_name, (int)PTR_ERR(ret));
Stefan Roese2fc10f62009-03-19 15:35:05 +01002691 return -1;
2692 }
2693
Stefan Roese2fc10f62009-03-19 15:35:05 +01002694 return 0;
2695}
Heiko Schocherf5895d12014-06-24 10:10:04 +02002696#endif