blob: 0a38fbef1414e9c03669bd0391c650bc43ee1dda [file] [log] [blame]
Kyungmin Parkf6d5e252008-11-19 16:20:36 +01001/*
2 * Core registration and callback routines for MTD
3 * drivers and users.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/mtd/mtd.h>
Mike Frysinger11d1a092012-04-09 13:39:55 +000011#include <linux/compat.h>
Kyungmin Parkf6d5e252008-11-19 16:20:36 +010012#include <ubi_uboot.h>
13
14struct mtd_info *mtd_table[MAX_MTD_DEVICES];
15
16int add_mtd_device(struct mtd_info *mtd)
17{
18 int i;
19
20 BUG_ON(mtd->writesize == 0);
21
22 for (i = 0; i < MAX_MTD_DEVICES; i++)
23 if (!mtd_table[i]) {
24 mtd_table[i] = mtd;
25 mtd->index = i;
26 mtd->usecount = 0;
27
Sergey Lapin3a38a552013-01-14 03:46:50 +000028 /* default value if not set by driver */
29 if (mtd->bitflip_threshold == 0)
30 mtd->bitflip_threshold = mtd->ecc_strength;
31
32
Kyungmin Parkf6d5e252008-11-19 16:20:36 +010033 /* No need to get a refcount on the module containing
34 the notifier, since we hold the mtd_table_mutex */
35
36 /* We _know_ we aren't being removed, because
37 our caller is still holding us here. So none
38 of this try_ nonsense, and no bitching about it
39 either. :) */
40 return 0;
41 }
42
43 return 1;
44}
45
46/**
47 * del_mtd_device - unregister an MTD device
48 * @mtd: pointer to MTD device info structure
49 *
50 * Remove a device from the list of MTD devices present in the system,
51 * and notify each currently active MTD 'user' of its departure.
52 * Returns zero on success or 1 on failure, which currently will happen
53 * if the requested device does not appear to be present in the list.
54 */
55int del_mtd_device(struct mtd_info *mtd)
56{
57 int ret;
58
59 if (mtd_table[mtd->index] != mtd) {
60 ret = -ENODEV;
61 } else if (mtd->usecount) {
62 printk(KERN_NOTICE "Removing MTD device #%d (%s)"
63 " with use count %d\n",
64 mtd->index, mtd->name, mtd->usecount);
65 ret = -EBUSY;
66 } else {
67 /* No need to get a refcount on the module containing
68 * the notifier, since we hold the mtd_table_mutex */
69 mtd_table[mtd->index] = NULL;
70
71 ret = 0;
72 }
73
74 return ret;
75}
76
77/**
78 * get_mtd_device - obtain a validated handle for an MTD device
79 * @mtd: last known address of the required MTD device
80 * @num: internal device number of the required MTD device
81 *
82 * Given a number and NULL address, return the num'th entry in the device
83 * table, if any. Given an address and num == -1, search the device table
84 * for a device with that address and return if it's still present. Given
85 * both, return the num'th driver only if its address matches. Return
86 * error code if not.
87 */
88struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
89{
90 struct mtd_info *ret = NULL;
91 int i, err = -ENODEV;
92
93 if (num == -1) {
94 for (i = 0; i < MAX_MTD_DEVICES; i++)
95 if (mtd_table[i] == mtd)
96 ret = mtd_table[i];
97 } else if (num < MAX_MTD_DEVICES) {
98 ret = mtd_table[num];
99 if (mtd && mtd != ret)
100 ret = NULL;
101 }
102
103 if (!ret)
104 goto out_unlock;
105
106 ret->usecount++;
107 return ret;
108
109out_unlock:
110 return ERR_PTR(err);
111}
112
113/**
114 * get_mtd_device_nm - obtain a validated handle for an MTD device by
115 * device name
116 * @name: MTD device name to open
117 *
118 * This function returns MTD device description structure in case of
119 * success and an error code in case of failure.
120 */
121struct mtd_info *get_mtd_device_nm(const char *name)
122{
123 int i, err = -ENODEV;
124 struct mtd_info *mtd = NULL;
125
126 for (i = 0; i < MAX_MTD_DEVICES; i++) {
127 if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) {
128 mtd = mtd_table[i];
129 break;
130 }
131 }
132
133 if (!mtd)
134 goto out_unlock;
135
136 mtd->usecount++;
137 return mtd;
138
139out_unlock:
140 return ERR_PTR(err);
141}
142
143void put_mtd_device(struct mtd_info *mtd)
144{
145 int c;
146
147 c = --mtd->usecount;
148 BUG_ON(c < 0);
149}
Ben Gardiner50bae732010-08-31 17:48:01 -0400150
151#if defined(CONFIG_CMD_MTDPARTS_SPREAD)
152/**
153 * mtd_get_len_incl_bad
154 *
155 * Check if length including bad blocks fits into device.
156 *
157 * @param mtd an MTD device
158 * @param offset offset in flash
159 * @param length image length
160 * @return image length including bad blocks in *len_incl_bad and whether or not
161 * the length returned was truncated in *truncated
162 */
163void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
164 const uint64_t length, uint64_t *len_incl_bad,
165 int *truncated)
166{
167 *truncated = 0;
168 *len_incl_bad = 0;
169
Ben Gardiner50bae732010-08-31 17:48:01 -0400170 if (!mtd->block_isbad) {
171 *len_incl_bad = length;
172 return;
173 }
174
175 uint64_t len_excl_bad = 0;
176 uint64_t block_len;
177
178 while (len_excl_bad < length) {
Scott Wood10390ce2010-09-09 15:40:03 -0500179 if (offset >= mtd->size) {
180 *truncated = 1;
181 return;
182 }
183
Ben Gardiner50bae732010-08-31 17:48:01 -0400184 block_len = mtd->erasesize - (offset & (mtd->erasesize - 1));
185
186 if (!mtd->block_isbad(mtd, offset & ~(mtd->erasesize - 1)))
187 len_excl_bad += block_len;
188
189 *len_incl_bad += block_len;
190 offset += block_len;
Ben Gardiner50bae732010-08-31 17:48:01 -0400191 }
192}
193#endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000194
195 /*
196 * Erase is an asynchronous operation. Device drivers are supposed
197 * to call instr->callback() whenever the operation completes, even
198 * if it completes with a failure.
199 * Callers are supposed to pass a callback function and wait for it
200 * to be called before writing to the block.
201 */
202int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
203{
204 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
205 return -EINVAL;
206 if (!(mtd->flags & MTD_WRITEABLE))
207 return -EROFS;
208 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
209 if (!instr->len) {
210 instr->state = MTD_ERASE_DONE;
211 mtd_erase_callback(instr);
212 return 0;
213 }
214 return mtd->_erase(mtd, instr);
215}
216
217int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
218 u_char *buf)
219{
Paul Burton700a76c2013-09-04 15:16:56 +0100220 int ret_code;
Sergey Lapin3a38a552013-01-14 03:46:50 +0000221 if (from < 0 || from > mtd->size || len > mtd->size - from)
222 return -EINVAL;
223 if (!len)
224 return 0;
Paul Burton700a76c2013-09-04 15:16:56 +0100225
226 /*
227 * In the absence of an error, drivers return a non-negative integer
228 * representing the maximum number of bitflips that were corrected on
229 * any one ecc region (if applicable; zero otherwise).
230 */
231 ret_code = mtd->_read(mtd, from, len, retlen, buf);
232 if (unlikely(ret_code < 0))
233 return ret_code;
234 if (mtd->ecc_strength == 0)
235 return 0; /* device lacks ecc */
236 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
Sergey Lapin3a38a552013-01-14 03:46:50 +0000237}
238
239int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
240 const u_char *buf)
241{
242 *retlen = 0;
243 if (to < 0 || to > mtd->size || len > mtd->size - to)
244 return -EINVAL;
245 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
246 return -EROFS;
247 if (!len)
248 return 0;
249 return mtd->_write(mtd, to, len, retlen, buf);
250}
251
252/*
253 * In blackbox flight recorder like scenarios we want to make successful writes
254 * in interrupt context. panic_write() is only intended to be called when its
255 * known the kernel is about to panic and we need the write to succeed. Since
256 * the kernel is not going to be running for much longer, this function can
257 * break locks and delay to ensure the write succeeds (but not sleep).
258 */
259int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
260 const u_char *buf)
261{
262 *retlen = 0;
263 if (!mtd->_panic_write)
264 return -EOPNOTSUPP;
265 if (to < 0 || to > mtd->size || len > mtd->size - to)
266 return -EINVAL;
267 if (!(mtd->flags & MTD_WRITEABLE))
268 return -EROFS;
269 if (!len)
270 return 0;
271 return mtd->_panic_write(mtd, to, len, retlen, buf);
272}
273
274int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
275{
276 ops->retlen = ops->oobretlen = 0;
277 if (!mtd->_read_oob)
278 return -EOPNOTSUPP;
279 return mtd->_read_oob(mtd, from, ops);
280}
281
282/*
283 * Method to access the protection register area, present in some flash
284 * devices. The user data is one time programmable but the factory data is read
285 * only.
286 */
287int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
288 size_t len)
289{
290 if (!mtd->_get_fact_prot_info)
291 return -EOPNOTSUPP;
292 if (!len)
293 return 0;
294 return mtd->_get_fact_prot_info(mtd, buf, len);
295}
296
297int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
298 size_t *retlen, u_char *buf)
299{
300 *retlen = 0;
301 if (!mtd->_read_fact_prot_reg)
302 return -EOPNOTSUPP;
303 if (!len)
304 return 0;
305 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
306}
307
308int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
309 size_t len)
310{
311 if (!mtd->_get_user_prot_info)
312 return -EOPNOTSUPP;
313 if (!len)
314 return 0;
315 return mtd->_get_user_prot_info(mtd, buf, len);
316}
317
318int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
319 size_t *retlen, u_char *buf)
320{
321 *retlen = 0;
322 if (!mtd->_read_user_prot_reg)
323 return -EOPNOTSUPP;
324 if (!len)
325 return 0;
326 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
327}
328
329int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
330 size_t *retlen, u_char *buf)
331{
332 *retlen = 0;
333 if (!mtd->_write_user_prot_reg)
334 return -EOPNOTSUPP;
335 if (!len)
336 return 0;
337 return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
338}
339
340int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
341{
342 if (!mtd->_lock_user_prot_reg)
343 return -EOPNOTSUPP;
344 if (!len)
345 return 0;
346 return mtd->_lock_user_prot_reg(mtd, from, len);
347}
348
349/* Chip-supported device locking */
350int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
351{
352 if (!mtd->_lock)
353 return -EOPNOTSUPP;
354 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
355 return -EINVAL;
356 if (!len)
357 return 0;
358 return mtd->_lock(mtd, ofs, len);
359}
360
361int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
362{
363 if (!mtd->_unlock)
364 return -EOPNOTSUPP;
365 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
366 return -EINVAL;
367 if (!len)
368 return 0;
369 return mtd->_unlock(mtd, ofs, len);
370}
371
372int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
373{
374 if (!mtd->_block_isbad)
375 return 0;
376 if (ofs < 0 || ofs > mtd->size)
377 return -EINVAL;
378 return mtd->_block_isbad(mtd, ofs);
379}
380
381int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
382{
383 if (!mtd->_block_markbad)
384 return -EOPNOTSUPP;
385 if (ofs < 0 || ofs > mtd->size)
386 return -EINVAL;
387 if (!(mtd->flags & MTD_WRITEABLE))
388 return -EROFS;
389 return mtd->_block_markbad(mtd, ofs);
390}