blob: 090834a495facf119b1b66cf5008c549dd323762 [file] [log] [blame]
Boris Brezillon894380f2018-08-16 17:30:09 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017 Free Electrons
4 *
5 * Authors:
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
8 */
9
10#define pr_fmt(fmt) "nand: " fmt
11
Miquel Raynal4f64c632019-10-03 19:50:21 +020012#include <common.h>
Patrice Chotardf2296662021-01-20 14:42:03 +010013#include <watchdog.h>
Boris Brezillon894380f2018-08-16 17:30:09 +020014#ifndef __UBOOT__
Simon Glass9bc15642020-02-03 07:36:16 -070015#include <linux/compat.h>
Boris Brezillon894380f2018-08-16 17:30:09 +020016#include <linux/module.h>
17#endif
Simon Glass4dcacfc2020-05-10 11:40:13 -060018#include <linux/bitops.h>
Boris Brezillon894380f2018-08-16 17:30:09 +020019#include <linux/mtd/nand.h>
20
21/**
22 * nanddev_isbad() - Check if a block is bad
23 * @nand: NAND device
24 * @pos: position pointing to the block we want to check
25 *
26 * Return: true if the block is bad, false otherwise.
27 */
28bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
29{
30 if (nanddev_bbt_is_initialized(nand)) {
31 unsigned int entry;
32 int status;
33
34 entry = nanddev_bbt_pos_to_entry(nand, pos);
35 status = nanddev_bbt_get_block_status(nand, entry);
36 /* Lazy block status retrieval */
37 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
38 if (nand->ops->isbad(nand, pos))
39 status = NAND_BBT_BLOCK_FACTORY_BAD;
40 else
41 status = NAND_BBT_BLOCK_GOOD;
42
43 nanddev_bbt_set_block_status(nand, entry, status);
44 }
45
46 if (status == NAND_BBT_BLOCK_WORN ||
47 status == NAND_BBT_BLOCK_FACTORY_BAD)
48 return true;
49
50 return false;
51 }
52
53 return nand->ops->isbad(nand, pos);
54}
55EXPORT_SYMBOL_GPL(nanddev_isbad);
56
57/**
58 * nanddev_markbad() - Mark a block as bad
59 * @nand: NAND device
60 * @pos: position of the block to mark bad
61 *
62 * Mark a block bad. This function is updating the BBT if available and
63 * calls the low-level markbad hook (nand->ops->markbad()).
64 *
65 * Return: 0 in case of success, a negative error code otherwise.
66 */
67int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
68{
69 struct mtd_info *mtd = nanddev_to_mtd(nand);
70 unsigned int entry;
71 int ret = 0;
72
73 if (nanddev_isbad(nand, pos))
74 return 0;
75
76 ret = nand->ops->markbad(nand, pos);
77 if (ret)
78 pr_warn("failed to write BBM to block @%llx (err = %d)\n",
79 nanddev_pos_to_offs(nand, pos), ret);
80
81 if (!nanddev_bbt_is_initialized(nand))
82 goto out;
83
84 entry = nanddev_bbt_pos_to_entry(nand, pos);
85 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
86 if (ret)
87 goto out;
88
89 ret = nanddev_bbt_update(nand);
90
91out:
92 if (!ret)
93 mtd->ecc_stats.badblocks++;
94
95 return ret;
96}
97EXPORT_SYMBOL_GPL(nanddev_markbad);
98
99/**
100 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
101 * @nand: NAND device
102 * @pos: NAND position to test
103 *
104 * Checks whether the eraseblock pointed by @pos is reserved or not.
105 *
106 * Return: true if the eraseblock is reserved, false otherwise.
107 */
108bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
109{
110 unsigned int entry;
111 int status;
112
113 if (!nanddev_bbt_is_initialized(nand))
114 return false;
115
116 /* Return info from the table */
117 entry = nanddev_bbt_pos_to_entry(nand, pos);
118 status = nanddev_bbt_get_block_status(nand, entry);
119 return status == NAND_BBT_BLOCK_RESERVED;
120}
121EXPORT_SYMBOL_GPL(nanddev_isreserved);
122
123/**
124 * nanddev_erase() - Erase a NAND portion
125 * @nand: NAND device
126 * @pos: position of the block to erase
127 *
128 * Erases the block if it's not bad.
129 *
130 * Return: 0 in case of success, a negative error code otherwise.
131 */
132int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
133{
Mikhail Kshevetskiy9ad25432020-06-22 16:16:34 +0300134 unsigned int entry;
135
Boris Brezillon894380f2018-08-16 17:30:09 +0200136 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
137 pr_warn("attempt to erase a bad/reserved block @%llx\n",
138 nanddev_pos_to_offs(nand, pos));
Mikhail Kshevetskiy9ad25432020-06-22 16:16:34 +0300139 if (nanddev_isreserved(nand, pos))
140 return -EIO;
141
142 /* remove bad block from BBT */
143 entry = nanddev_bbt_pos_to_entry(nand, pos);
144 nanddev_bbt_set_block_status(nand, entry,
145 NAND_BBT_BLOCK_STATUS_UNKNOWN);
Boris Brezillon894380f2018-08-16 17:30:09 +0200146 }
147
148 return nand->ops->erase(nand, pos);
149}
150EXPORT_SYMBOL_GPL(nanddev_erase);
151
152/**
153 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
154 * @mtd: MTD device
155 * @einfo: erase request
156 *
157 * This is a simple mtd->_erase() implementation iterating over all blocks
158 * concerned by @einfo and calling nand->ops->erase() on each of them.
159 *
160 * Note that mtd->_erase should not be directly assigned to this helper,
161 * because there's no locking here. NAND specialized layers should instead
162 * implement there own wrapper around nanddev_mtd_erase() taking the
163 * appropriate lock before calling nanddev_mtd_erase().
164 *
165 * Return: 0 in case of success, a negative error code otherwise.
166 */
167int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
168{
169 struct nand_device *nand = mtd_to_nanddev(mtd);
170 struct nand_pos pos, last;
171 int ret;
172
173 nanddev_offs_to_pos(nand, einfo->addr, &pos);
174 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
175 while (nanddev_pos_cmp(&pos, &last) <= 0) {
Patrice Chotardf2296662021-01-20 14:42:03 +0100176 WATCHDOG_RESET();
Boris Brezillon894380f2018-08-16 17:30:09 +0200177 ret = nanddev_erase(nand, &pos);
178 if (ret) {
179 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
180
181 return ret;
182 }
183
184 nanddev_pos_next_eraseblock(nand, &pos);
185 }
186
187 return 0;
188}
189EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
190
191/**
192 * nanddev_init() - Initialize a NAND device
193 * @nand: NAND device
194 * @ops: NAND device operations
195 * @owner: NAND device owner
196 *
197 * Initializes a NAND device object. Consistency checks are done on @ops and
198 * @nand->memorg. Also takes care of initializing the BBT.
199 *
200 * Return: 0 in case of success, a negative error code otherwise.
201 */
202int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
203 struct module *owner)
204{
205 struct mtd_info *mtd = nanddev_to_mtd(nand);
206 struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
207
208 if (!nand || !ops)
209 return -EINVAL;
210
211 if (!ops->erase || !ops->markbad || !ops->isbad)
212 return -EINVAL;
213
214 if (!memorg->bits_per_cell || !memorg->pagesize ||
215 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
216 !memorg->planes_per_lun || !memorg->luns_per_target ||
217 !memorg->ntargets)
218 return -EINVAL;
219
220 nand->rowconv.eraseblock_addr_shift =
221 fls(memorg->pages_per_eraseblock - 1);
222 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
223 nand->rowconv.eraseblock_addr_shift;
224
225 nand->ops = ops;
226
227 mtd->type = memorg->bits_per_cell == 1 ?
228 MTD_NANDFLASH : MTD_MLCNANDFLASH;
229 mtd->flags = MTD_CAP_NANDFLASH;
230 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
231 mtd->writesize = memorg->pagesize;
232 mtd->writebufsize = memorg->pagesize;
233 mtd->oobsize = memorg->oobsize;
234 mtd->size = nanddev_size(nand);
235 mtd->owner = owner;
236
237 return nanddev_bbt_init(nand);
238}
239EXPORT_SYMBOL_GPL(nanddev_init);
240
241/**
242 * nanddev_cleanup() - Release resources allocated in nanddev_init()
243 * @nand: NAND device
244 *
245 * Basically undoes what has been done in nanddev_init().
246 */
247void nanddev_cleanup(struct nand_device *nand)
248{
249 if (nanddev_bbt_is_initialized(nand))
250 nanddev_bbt_cleanup(nand);
251}
252EXPORT_SYMBOL_GPL(nanddev_cleanup);
253
254MODULE_DESCRIPTION("Generic NAND framework");
255MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
256MODULE_LICENSE("GPL v2");