blob: 0b793695ccd6475c2e6c30e746fa8fec3c65352b [file] [log] [blame]
Boris Brezillon894380f2018-08-16 17:30:09 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017 Free Electrons
4 *
5 * Authors:
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
8 */
9
10#define pr_fmt(fmt) "nand: " fmt
11
12#ifndef __UBOOT__
13#include <linux/module.h>
14#endif
15#include <linux/mtd/nand.h>
16
17/**
18 * nanddev_isbad() - Check if a block is bad
19 * @nand: NAND device
20 * @pos: position pointing to the block we want to check
21 *
22 * Return: true if the block is bad, false otherwise.
23 */
24bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
25{
26 if (nanddev_bbt_is_initialized(nand)) {
27 unsigned int entry;
28 int status;
29
30 entry = nanddev_bbt_pos_to_entry(nand, pos);
31 status = nanddev_bbt_get_block_status(nand, entry);
32 /* Lazy block status retrieval */
33 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
34 if (nand->ops->isbad(nand, pos))
35 status = NAND_BBT_BLOCK_FACTORY_BAD;
36 else
37 status = NAND_BBT_BLOCK_GOOD;
38
39 nanddev_bbt_set_block_status(nand, entry, status);
40 }
41
42 if (status == NAND_BBT_BLOCK_WORN ||
43 status == NAND_BBT_BLOCK_FACTORY_BAD)
44 return true;
45
46 return false;
47 }
48
49 return nand->ops->isbad(nand, pos);
50}
51EXPORT_SYMBOL_GPL(nanddev_isbad);
52
53/**
54 * nanddev_markbad() - Mark a block as bad
55 * @nand: NAND device
56 * @pos: position of the block to mark bad
57 *
58 * Mark a block bad. This function is updating the BBT if available and
59 * calls the low-level markbad hook (nand->ops->markbad()).
60 *
61 * Return: 0 in case of success, a negative error code otherwise.
62 */
63int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
64{
65 struct mtd_info *mtd = nanddev_to_mtd(nand);
66 unsigned int entry;
67 int ret = 0;
68
69 if (nanddev_isbad(nand, pos))
70 return 0;
71
72 ret = nand->ops->markbad(nand, pos);
73 if (ret)
74 pr_warn("failed to write BBM to block @%llx (err = %d)\n",
75 nanddev_pos_to_offs(nand, pos), ret);
76
77 if (!nanddev_bbt_is_initialized(nand))
78 goto out;
79
80 entry = nanddev_bbt_pos_to_entry(nand, pos);
81 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
82 if (ret)
83 goto out;
84
85 ret = nanddev_bbt_update(nand);
86
87out:
88 if (!ret)
89 mtd->ecc_stats.badblocks++;
90
91 return ret;
92}
93EXPORT_SYMBOL_GPL(nanddev_markbad);
94
95/**
96 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
97 * @nand: NAND device
98 * @pos: NAND position to test
99 *
100 * Checks whether the eraseblock pointed by @pos is reserved or not.
101 *
102 * Return: true if the eraseblock is reserved, false otherwise.
103 */
104bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
105{
106 unsigned int entry;
107 int status;
108
109 if (!nanddev_bbt_is_initialized(nand))
110 return false;
111
112 /* Return info from the table */
113 entry = nanddev_bbt_pos_to_entry(nand, pos);
114 status = nanddev_bbt_get_block_status(nand, entry);
115 return status == NAND_BBT_BLOCK_RESERVED;
116}
117EXPORT_SYMBOL_GPL(nanddev_isreserved);
118
119/**
120 * nanddev_erase() - Erase a NAND portion
121 * @nand: NAND device
122 * @pos: position of the block to erase
123 *
124 * Erases the block if it's not bad.
125 *
126 * Return: 0 in case of success, a negative error code otherwise.
127 */
128int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
129{
130 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
131 pr_warn("attempt to erase a bad/reserved block @%llx\n",
132 nanddev_pos_to_offs(nand, pos));
133 return -EIO;
134 }
135
136 return nand->ops->erase(nand, pos);
137}
138EXPORT_SYMBOL_GPL(nanddev_erase);
139
140/**
141 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
142 * @mtd: MTD device
143 * @einfo: erase request
144 *
145 * This is a simple mtd->_erase() implementation iterating over all blocks
146 * concerned by @einfo and calling nand->ops->erase() on each of them.
147 *
148 * Note that mtd->_erase should not be directly assigned to this helper,
149 * because there's no locking here. NAND specialized layers should instead
150 * implement there own wrapper around nanddev_mtd_erase() taking the
151 * appropriate lock before calling nanddev_mtd_erase().
152 *
153 * Return: 0 in case of success, a negative error code otherwise.
154 */
155int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
156{
157 struct nand_device *nand = mtd_to_nanddev(mtd);
158 struct nand_pos pos, last;
159 int ret;
160
161 nanddev_offs_to_pos(nand, einfo->addr, &pos);
162 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
163 while (nanddev_pos_cmp(&pos, &last) <= 0) {
164 ret = nanddev_erase(nand, &pos);
165 if (ret) {
166 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
167
168 return ret;
169 }
170
171 nanddev_pos_next_eraseblock(nand, &pos);
172 }
173
174 return 0;
175}
176EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
177
178/**
179 * nanddev_init() - Initialize a NAND device
180 * @nand: NAND device
181 * @ops: NAND device operations
182 * @owner: NAND device owner
183 *
184 * Initializes a NAND device object. Consistency checks are done on @ops and
185 * @nand->memorg. Also takes care of initializing the BBT.
186 *
187 * Return: 0 in case of success, a negative error code otherwise.
188 */
189int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
190 struct module *owner)
191{
192 struct mtd_info *mtd = nanddev_to_mtd(nand);
193 struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
194
195 if (!nand || !ops)
196 return -EINVAL;
197
198 if (!ops->erase || !ops->markbad || !ops->isbad)
199 return -EINVAL;
200
201 if (!memorg->bits_per_cell || !memorg->pagesize ||
202 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
203 !memorg->planes_per_lun || !memorg->luns_per_target ||
204 !memorg->ntargets)
205 return -EINVAL;
206
207 nand->rowconv.eraseblock_addr_shift =
208 fls(memorg->pages_per_eraseblock - 1);
209 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
210 nand->rowconv.eraseblock_addr_shift;
211
212 nand->ops = ops;
213
214 mtd->type = memorg->bits_per_cell == 1 ?
215 MTD_NANDFLASH : MTD_MLCNANDFLASH;
216 mtd->flags = MTD_CAP_NANDFLASH;
217 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
218 mtd->writesize = memorg->pagesize;
219 mtd->writebufsize = memorg->pagesize;
220 mtd->oobsize = memorg->oobsize;
221 mtd->size = nanddev_size(nand);
222 mtd->owner = owner;
223
224 return nanddev_bbt_init(nand);
225}
226EXPORT_SYMBOL_GPL(nanddev_init);
227
228/**
229 * nanddev_cleanup() - Release resources allocated in nanddev_init()
230 * @nand: NAND device
231 *
232 * Basically undoes what has been done in nanddev_init().
233 */
234void nanddev_cleanup(struct nand_device *nand)
235{
236 if (nanddev_bbt_is_initialized(nand))
237 nanddev_bbt_cleanup(nand);
238}
239EXPORT_SYMBOL_GPL(nanddev_cleanup);
240
241MODULE_DESCRIPTION("Generic NAND framework");
242MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
243MODULE_LICENSE("GPL v2");