blob: ada7af4a416fa8b156f2a3ed245c9c0503bf9202 [file] [log] [blame]
Boris Brezillon894380f2018-08-16 17:30:09 +02001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright 2017 - Free Electrons
4 *
5 * Authors:
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
8 */
9
10#ifndef __LINUX_MTD_NAND_H
11#define __LINUX_MTD_NAND_H
12
13#include <linux/mtd/mtd.h>
14
15/**
16 * struct nand_memory_organization - Memory organization structure
17 * @bits_per_cell: number of bits per NAND cell
18 * @pagesize: page size
19 * @oobsize: OOB area size
20 * @pages_per_eraseblock: number of pages per eraseblock
21 * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
22 * @planes_per_lun: number of planes per LUN
23 * @luns_per_target: number of LUN per target (target is a synonym for die)
24 * @ntargets: total number of targets exposed by the NAND device
25 */
26struct nand_memory_organization {
27 unsigned int bits_per_cell;
28 unsigned int pagesize;
29 unsigned int oobsize;
30 unsigned int pages_per_eraseblock;
31 unsigned int eraseblocks_per_lun;
32 unsigned int planes_per_lun;
33 unsigned int luns_per_target;
34 unsigned int ntargets;
35};
36
37#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
38 { \
39 .bits_per_cell = (bpc), \
40 .pagesize = (ps), \
41 .oobsize = (os), \
42 .pages_per_eraseblock = (ppe), \
43 .eraseblocks_per_lun = (epl), \
44 .planes_per_lun = (ppl), \
45 .luns_per_target = (lpt), \
46 .ntargets = (nt), \
47 }
48
49/**
50 * struct nand_row_converter - Information needed to convert an absolute offset
51 * into a row address
52 * @lun_addr_shift: position of the LUN identifier in the row address
53 * @eraseblock_addr_shift: position of the eraseblock identifier in the row
54 * address
55 */
56struct nand_row_converter {
57 unsigned int lun_addr_shift;
58 unsigned int eraseblock_addr_shift;
59};
60
61/**
62 * struct nand_pos - NAND position object
63 * @target: the NAND target/die
64 * @lun: the LUN identifier
65 * @plane: the plane within the LUN
66 * @eraseblock: the eraseblock within the LUN
67 * @page: the page within the LUN
68 *
69 * These information are usually used by specific sub-layers to select the
70 * appropriate target/die and generate a row address to pass to the device.
71 */
72struct nand_pos {
73 unsigned int target;
74 unsigned int lun;
75 unsigned int plane;
76 unsigned int eraseblock;
77 unsigned int page;
78};
79
80/**
81 * struct nand_page_io_req - NAND I/O request object
82 * @pos: the position this I/O request is targeting
83 * @dataoffs: the offset within the page
84 * @datalen: number of data bytes to read from/write to this page
85 * @databuf: buffer to store data in or get data from
86 * @ooboffs: the OOB offset within the page
87 * @ooblen: the number of OOB bytes to read from/write to this page
88 * @oobbuf: buffer to store OOB data in or get OOB data from
89 *
90 * This object is used to pass per-page I/O requests to NAND sub-layers. This
91 * way all useful information are already formatted in a useful way and
92 * specific NAND layers can focus on translating these information into
93 * specific commands/operations.
94 */
95struct nand_page_io_req {
96 struct nand_pos pos;
97 unsigned int dataoffs;
98 unsigned int datalen;
99 union {
100 const void *out;
101 void *in;
102 } databuf;
103 unsigned int ooboffs;
104 unsigned int ooblen;
105 union {
106 const void *out;
107 void *in;
108 } oobbuf;
109};
110
111/**
112 * struct nand_ecc_req - NAND ECC requirements
113 * @strength: ECC strength
114 * @step_size: ECC step/block size
115 */
116struct nand_ecc_req {
117 unsigned int strength;
118 unsigned int step_size;
119};
120
121#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
122
123/**
124 * struct nand_bbt - bad block table object
125 * @cache: in memory BBT cache
126 */
127struct nand_bbt {
128 unsigned long *cache;
129};
130
131struct nand_device;
132
133/**
134 * struct nand_ops - NAND operations
135 * @erase: erase a specific block. No need to check if the block is bad before
136 * erasing, this has been taken care of by the generic NAND layer
137 * @markbad: mark a specific block bad. No need to check if the block is
138 * already marked bad, this has been taken care of by the generic
139 * NAND layer. This method should just write the BBM (Bad Block
140 * Marker) so that future call to struct_nand_ops->isbad() return
141 * true
142 * @isbad: check whether a block is bad or not. This method should just read
143 * the BBM and return whether the block is bad or not based on what it
144 * reads
145 *
146 * These are all low level operations that should be implemented by specialized
147 * NAND layers (SPI NAND, raw NAND, ...).
148 */
149struct nand_ops {
150 int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
151 int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
152 bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
153};
154
155/**
156 * struct nand_device - NAND device
157 * @mtd: MTD instance attached to the NAND device
158 * @memorg: memory layout
159 * @eccreq: ECC requirements
160 * @rowconv: position to row address converter
161 * @bbt: bad block table info
162 * @ops: NAND operations attached to the NAND device
163 *
164 * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
165 * should declare their own NAND object embedding a nand_device struct (that's
166 * how inheritance is done).
167 * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
168 * at device detection time to reflect the NAND device
169 * capabilities/requirements. Once this is done nanddev_init() can be called.
170 * It will take care of converting NAND information into MTD ones, which means
171 * the specialized NAND layers should never manually tweak
172 * struct_nand_device->mtd except for the ->_read/write() hooks.
173 */
174struct nand_device {
175 struct mtd_info *mtd;
176 struct nand_memory_organization memorg;
177 struct nand_ecc_req eccreq;
178 struct nand_row_converter rowconv;
179 struct nand_bbt bbt;
180 const struct nand_ops *ops;
181};
182
183/**
184 * struct nand_io_iter - NAND I/O iterator
185 * @req: current I/O request
186 * @oobbytes_per_page: maximum number of OOB bytes per page
187 * @dataleft: remaining number of data bytes to read/write
188 * @oobleft: remaining number of OOB bytes to read/write
189 *
190 * Can be used by specialized NAND layers to iterate over all pages covered
191 * by an MTD I/O request, which should greatly simplifies the boiler-plate
192 * code needed to read/write data from/to a NAND device.
193 */
194struct nand_io_iter {
195 struct nand_page_io_req req;
196 unsigned int oobbytes_per_page;
197 unsigned int dataleft;
198 unsigned int oobleft;
199};
200
201/**
202 * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
203 * @mtd: MTD instance
204 *
205 * Return: the NAND device embedding @mtd.
206 */
207static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
208{
209 return mtd->priv;
210}
211
212/**
213 * nanddev_to_mtd() - Get the MTD device attached to a NAND device
214 * @nand: NAND device
215 *
216 * Return: the MTD device embedded in @nand.
217 */
218static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
219{
220 return nand->mtd;
221}
222
223/*
224 * nanddev_bits_per_cell() - Get the number of bits per cell
225 * @nand: NAND device
226 *
227 * Return: the number of bits per cell.
228 */
229static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
230{
231 return nand->memorg.bits_per_cell;
232}
233
234/**
235 * nanddev_page_size() - Get NAND page size
236 * @nand: NAND device
237 *
238 * Return: the page size.
239 */
240static inline size_t nanddev_page_size(const struct nand_device *nand)
241{
242 return nand->memorg.pagesize;
243}
244
245/**
246 * nanddev_per_page_oobsize() - Get NAND OOB size
247 * @nand: NAND device
248 *
249 * Return: the OOB size.
250 */
251static inline unsigned int
252nanddev_per_page_oobsize(const struct nand_device *nand)
253{
254 return nand->memorg.oobsize;
255}
256
257/**
258 * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
259 * @nand: NAND device
260 *
261 * Return: the number of pages per eraseblock.
262 */
263static inline unsigned int
264nanddev_pages_per_eraseblock(const struct nand_device *nand)
265{
266 return nand->memorg.pages_per_eraseblock;
267}
268
269/**
270 * nanddev_per_page_oobsize() - Get NAND erase block size
271 * @nand: NAND device
272 *
273 * Return: the eraseblock size.
274 */
275static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
276{
277 return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
278}
279
280/**
281 * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
282 * @nand: NAND device
283 *
284 * Return: the number of eraseblocks per LUN.
285 */
286static inline unsigned int
287nanddev_eraseblocks_per_lun(const struct nand_device *nand)
288{
289 return nand->memorg.eraseblocks_per_lun;
290}
291
292/**
293 * nanddev_target_size() - Get the total size provided by a single target/die
294 * @nand: NAND device
295 *
296 * Return: the total size exposed by a single target/die in bytes.
297 */
298static inline u64 nanddev_target_size(const struct nand_device *nand)
299{
300 return (u64)nand->memorg.luns_per_target *
301 nand->memorg.eraseblocks_per_lun *
302 nand->memorg.pages_per_eraseblock *
303 nand->memorg.pagesize;
304}
305
306/**
307 * nanddev_ntarget() - Get the total of targets
308 * @nand: NAND device
309 *
310 * Return: the number of targets/dies exposed by @nand.
311 */
312static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
313{
314 return nand->memorg.ntargets;
315}
316
317/**
318 * nanddev_neraseblocks() - Get the total number of erasablocks
319 * @nand: NAND device
320 *
321 * Return: the total number of eraseblocks exposed by @nand.
322 */
323static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
324{
325 return (u64)nand->memorg.luns_per_target *
326 nand->memorg.eraseblocks_per_lun *
327 nand->memorg.pages_per_eraseblock;
328}
329
330/**
331 * nanddev_size() - Get NAND size
332 * @nand: NAND device
333 *
334 * Return: the total size (in bytes) exposed by @nand.
335 */
336static inline u64 nanddev_size(const struct nand_device *nand)
337{
338 return nanddev_target_size(nand) * nanddev_ntargets(nand);
339}
340
341/**
342 * nanddev_get_memorg() - Extract memory organization info from a NAND device
343 * @nand: NAND device
344 *
345 * This can be used by the upper layer to fill the memorg info before calling
346 * nanddev_init().
347 *
348 * Return: the memorg object embedded in the NAND device.
349 */
350static inline struct nand_memory_organization *
351nanddev_get_memorg(struct nand_device *nand)
352{
353 return &nand->memorg;
354}
355
356int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
357 struct module *owner);
358void nanddev_cleanup(struct nand_device *nand);
359
360/**
361 * nanddev_register() - Register a NAND device
362 * @nand: NAND device
363 *
364 * Register a NAND device.
365 * This function is just a wrapper around mtd_device_register()
366 * registering the MTD device embedded in @nand.
367 *
368 * Return: 0 in case of success, a negative error code otherwise.
369 */
370static inline int nanddev_register(struct nand_device *nand)
371{
372 return mtd_device_register(nand->mtd, NULL, 0);
373}
374
375/**
376 * nanddev_unregister() - Unregister a NAND device
377 * @nand: NAND device
378 *
379 * Unregister a NAND device.
380 * This function is just a wrapper around mtd_device_unregister()
381 * unregistering the MTD device embedded in @nand.
382 *
383 * Return: 0 in case of success, a negative error code otherwise.
384 */
385static inline int nanddev_unregister(struct nand_device *nand)
386{
387 return mtd_device_unregister(nand->mtd);
388}
389
390/**
391 * nanddev_set_of_node() - Attach a DT node to a NAND device
392 * @nand: NAND device
393 * @np: DT node
394 *
395 * Attach a DT node to a NAND device.
396 */
397static inline void nanddev_set_of_node(struct nand_device *nand,
398 const struct device_node *np)
399{
400 mtd_set_of_node(nand->mtd, np);
401}
402
403/**
404 * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
405 * @nand: NAND device
406 *
407 * Return: the DT node attached to @nand.
408 */
409static inline const struct device_node *nanddev_get_of_node(struct nand_device *nand)
410{
411 return mtd_get_of_node(nand->mtd);
412}
413
414/**
415 * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
416 * @nand: NAND device
417 * @offs: absolute NAND offset (usually passed by the MTD layer)
418 * @pos: a NAND position object to fill in
419 *
420 * Converts @offs into a nand_pos representation.
421 *
422 * Return: the offset within the NAND page pointed by @pos.
423 */
424static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
425 loff_t offs,
426 struct nand_pos *pos)
427{
428 unsigned int pageoffs;
429 u64 tmp = offs;
430
431 pageoffs = do_div(tmp, nand->memorg.pagesize);
432 pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
433 pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
434 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
435 pos->lun = do_div(tmp, nand->memorg.luns_per_target);
436 pos->target = tmp;
437
438 return pageoffs;
439}
440
441/**
442 * nanddev_pos_cmp() - Compare two NAND positions
443 * @a: First NAND position
444 * @b: Second NAND position
445 *
446 * Compares two NAND positions.
447 *
448 * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
449 */
450static inline int nanddev_pos_cmp(const struct nand_pos *a,
451 const struct nand_pos *b)
452{
453 if (a->target != b->target)
454 return a->target < b->target ? -1 : 1;
455
456 if (a->lun != b->lun)
457 return a->lun < b->lun ? -1 : 1;
458
459 if (a->eraseblock != b->eraseblock)
460 return a->eraseblock < b->eraseblock ? -1 : 1;
461
462 if (a->page != b->page)
463 return a->page < b->page ? -1 : 1;
464
465 return 0;
466}
467
468/**
469 * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
470 * @nand: NAND device
471 * @pos: the NAND position to convert
472 *
473 * Converts @pos NAND position into an absolute offset.
474 *
475 * Return: the absolute offset. Note that @pos points to the beginning of a
476 * page, if one wants to point to a specific offset within this page
477 * the returned offset has to be adjusted manually.
478 */
479static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
480 const struct nand_pos *pos)
481{
482 unsigned int npages;
483
484 npages = pos->page +
485 ((pos->eraseblock +
486 (pos->lun +
487 (pos->target * nand->memorg.luns_per_target)) *
488 nand->memorg.eraseblocks_per_lun) *
489 nand->memorg.pages_per_eraseblock);
490
491 return (loff_t)npages * nand->memorg.pagesize;
492}
493
494/**
495 * nanddev_pos_to_row() - Extract a row address from a NAND position
496 * @nand: NAND device
497 * @pos: the position to convert
498 *
499 * Converts a NAND position into a row address that can then be passed to the
500 * device.
501 *
502 * Return: the row address extracted from @pos.
503 */
504static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
505 const struct nand_pos *pos)
506{
507 return (pos->lun << nand->rowconv.lun_addr_shift) |
508 (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
509 pos->page;
510}
511
512/**
513 * nanddev_pos_next_target() - Move a position to the next target/die
514 * @nand: NAND device
515 * @pos: the position to update
516 *
517 * Updates @pos to point to the start of the next target/die. Useful when you
518 * want to iterate over all targets/dies of a NAND device.
519 */
520static inline void nanddev_pos_next_target(struct nand_device *nand,
521 struct nand_pos *pos)
522{
523 pos->page = 0;
524 pos->plane = 0;
525 pos->eraseblock = 0;
526 pos->lun = 0;
527 pos->target++;
528}
529
530/**
531 * nanddev_pos_next_lun() - Move a position to the next LUN
532 * @nand: NAND device
533 * @pos: the position to update
534 *
535 * Updates @pos to point to the start of the next LUN. Useful when you want to
536 * iterate over all LUNs of a NAND device.
537 */
538static inline void nanddev_pos_next_lun(struct nand_device *nand,
539 struct nand_pos *pos)
540{
541 if (pos->lun >= nand->memorg.luns_per_target - 1)
542 return nanddev_pos_next_target(nand, pos);
543
544 pos->lun++;
545 pos->page = 0;
546 pos->plane = 0;
547 pos->eraseblock = 0;
548}
549
550/**
551 * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
552 * @nand: NAND device
553 * @pos: the position to update
554 *
555 * Updates @pos to point to the start of the next eraseblock. Useful when you
556 * want to iterate over all eraseblocks of a NAND device.
557 */
558static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
559 struct nand_pos *pos)
560{
561 if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
562 return nanddev_pos_next_lun(nand, pos);
563
564 pos->eraseblock++;
565 pos->page = 0;
566 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
567}
568
569/**
570 * nanddev_pos_next_eraseblock() - Move a position to the next page
571 * @nand: NAND device
572 * @pos: the position to update
573 *
574 * Updates @pos to point to the start of the next page. Useful when you want to
575 * iterate over all pages of a NAND device.
576 */
577static inline void nanddev_pos_next_page(struct nand_device *nand,
578 struct nand_pos *pos)
579{
580 if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
581 return nanddev_pos_next_eraseblock(nand, pos);
582
583 pos->page++;
584}
585
586/**
587 * nand_io_iter_init - Initialize a NAND I/O iterator
588 * @nand: NAND device
589 * @offs: absolute offset
590 * @req: MTD request
591 * @iter: NAND I/O iterator
592 *
593 * Initializes a NAND iterator based on the information passed by the MTD
594 * layer.
595 */
596static inline void nanddev_io_iter_init(struct nand_device *nand,
597 loff_t offs, struct mtd_oob_ops *req,
598 struct nand_io_iter *iter)
599{
600 struct mtd_info *mtd = nanddev_to_mtd(nand);
601
602 iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
603 iter->req.ooboffs = req->ooboffs;
604 iter->oobbytes_per_page = mtd_oobavail(mtd, req);
605 iter->dataleft = req->len;
606 iter->oobleft = req->ooblen;
607 iter->req.databuf.in = req->datbuf;
608 iter->req.datalen = min_t(unsigned int,
609 nand->memorg.pagesize - iter->req.dataoffs,
610 iter->dataleft);
611 iter->req.oobbuf.in = req->oobbuf;
612 iter->req.ooblen = min_t(unsigned int,
613 iter->oobbytes_per_page - iter->req.ooboffs,
614 iter->oobleft);
615}
616
617/**
618 * nand_io_iter_next_page - Move to the next page
619 * @nand: NAND device
620 * @iter: NAND I/O iterator
621 *
622 * Updates the @iter to point to the next page.
623 */
624static inline void nanddev_io_iter_next_page(struct nand_device *nand,
625 struct nand_io_iter *iter)
626{
627 nanddev_pos_next_page(nand, &iter->req.pos);
628 iter->dataleft -= iter->req.datalen;
629 iter->req.databuf.in += iter->req.datalen;
630 iter->oobleft -= iter->req.ooblen;
631 iter->req.oobbuf.in += iter->req.ooblen;
632 iter->req.dataoffs = 0;
633 iter->req.ooboffs = 0;
634 iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
635 iter->dataleft);
636 iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
637 iter->oobleft);
638}
639
640/**
641 * nand_io_iter_end - Should end iteration or not
642 * @nand: NAND device
643 * @iter: NAND I/O iterator
644 *
645 * Check whether @iter has reached the end of the NAND portion it was asked to
646 * iterate on or not.
647 *
648 * Return: true if @iter has reached the end of the iteration request, false
649 * otherwise.
650 */
651static inline bool nanddev_io_iter_end(struct nand_device *nand,
652 const struct nand_io_iter *iter)
653{
654 if (iter->dataleft || iter->oobleft)
655 return false;
656
657 return true;
658}
659
660/**
661 * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
662 * request
663 * @nand: NAND device
664 * @start: start address to read/write from
665 * @req: MTD I/O request
666 * @iter: NAND I/O iterator
667 *
668 * Should be used for iterate over pages that are contained in an MTD request.
669 */
670#define nanddev_io_for_each_page(nand, start, req, iter) \
671 for (nanddev_io_iter_init(nand, start, req, iter); \
672 !nanddev_io_iter_end(nand, iter); \
673 nanddev_io_iter_next_page(nand, iter))
674
675bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
676bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
677int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
678int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
679
680/* BBT related functions */
681enum nand_bbt_block_status {
682 NAND_BBT_BLOCK_STATUS_UNKNOWN,
683 NAND_BBT_BLOCK_GOOD,
684 NAND_BBT_BLOCK_WORN,
685 NAND_BBT_BLOCK_RESERVED,
686 NAND_BBT_BLOCK_FACTORY_BAD,
687 NAND_BBT_BLOCK_NUM_STATUS,
688};
689
690int nanddev_bbt_init(struct nand_device *nand);
691void nanddev_bbt_cleanup(struct nand_device *nand);
692int nanddev_bbt_update(struct nand_device *nand);
693int nanddev_bbt_get_block_status(const struct nand_device *nand,
694 unsigned int entry);
695int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
696 enum nand_bbt_block_status status);
697int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
698
699/**
700 * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
701 * @nand: NAND device
702 * @pos: the NAND position we want to get BBT entry for
703 *
704 * Return the BBT entry used to store information about the eraseblock pointed
705 * by @pos.
706 *
707 * Return: the BBT entry storing information about eraseblock pointed by @pos.
708 */
709static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
710 const struct nand_pos *pos)
711{
712 return pos->eraseblock +
713 ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
714 nand->memorg.eraseblocks_per_lun);
715}
716
717/**
718 * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
719 * @nand: NAND device
720 *
721 * Return: true if the BBT has been initialized, false otherwise.
722 */
723static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
724{
725 return !!nand->bbt.cache;
726}
727
728/* MTD -> NAND helper functions. */
729int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
730
731#endif /* __LINUX_MTD_NAND_H */