blob: 744383aa3c2a87981fd76ca6ace35f31b5a6be10 [file] [log] [blame]
Lionel Debieve65805c12019-09-25 15:03:59 +02001/*
Christophe Kerello93a583f2022-09-27 11:02:55 +02002 * Copyright (c) 2019-2023, STMicroelectronics - All Rights Reserved
Lionel Debieve65805c12019-09-25 15:03:59 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <stddef.h>
10
Lionel Debieve65805c12019-09-25 15:03:59 +020011#include <common/debug.h>
12#include <drivers/delay_timer.h>
13#include <drivers/spi_nand.h>
14#include <lib/utils.h>
15
Yann Gautier442b2232022-02-14 09:56:54 +010016#include <platform_def.h>
17
Lionel Debieve65805c12019-09-25 15:03:59 +020018#define SPI_NAND_MAX_ID_LEN 4U
19#define DELAY_US_400MS 400000U
Lionel Debieve65805c12019-09-25 15:03:59 +020020
21static struct spinand_device spinand_dev;
22
23#pragma weak plat_get_spi_nand_data
24int plat_get_spi_nand_data(struct spinand_device *device)
25{
26 return 0;
27}
28
29static int spi_nand_reg(bool read_reg, uint8_t reg, uint8_t *val,
30 enum spi_mem_data_dir dir)
31{
32 struct spi_mem_op op;
33
34 zeromem(&op, sizeof(struct spi_mem_op));
35 if (read_reg) {
36 op.cmd.opcode = SPI_NAND_OP_GET_FEATURE;
37 } else {
38 op.cmd.opcode = SPI_NAND_OP_SET_FEATURE;
39 }
40
41 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
42 op.addr.val = reg;
43 op.addr.nbytes = 1U;
44 op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
45 op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
46 op.data.dir = dir;
47 op.data.nbytes = 1U;
48 op.data.buf = val;
49
50 return spi_mem_exec_op(&op);
51}
52
53static int spi_nand_read_reg(uint8_t reg, uint8_t *val)
54{
55 return spi_nand_reg(true, reg, val, SPI_MEM_DATA_IN);
56}
57
58static int spi_nand_write_reg(uint8_t reg, uint8_t val)
59{
60 return spi_nand_reg(false, reg, &val, SPI_MEM_DATA_OUT);
61}
62
63static int spi_nand_update_cfg(uint8_t mask, uint8_t val)
64{
65 int ret;
66 uint8_t cfg = spinand_dev.cfg_cache;
67
68 cfg &= ~mask;
69 cfg |= val;
70
71 if (cfg == spinand_dev.cfg_cache) {
72 return 0;
73 }
74
75 ret = spi_nand_write_reg(SPI_NAND_REG_CFG, cfg);
76 if (ret == 0) {
77 spinand_dev.cfg_cache = cfg;
78 }
79
80 return ret;
81}
82
83static int spi_nand_ecc_enable(bool enable)
84{
85 return spi_nand_update_cfg(SPI_NAND_CFG_ECC_EN,
86 enable ? SPI_NAND_CFG_ECC_EN : 0U);
87}
88
89static int spi_nand_quad_enable(uint8_t manufacturer_id)
90{
91 bool enable = false;
92
Christophe Kerello93a583f2022-09-27 11:02:55 +020093 if ((spinand_dev.flags & SPI_NAND_HAS_QE_BIT) == 0U) {
Lionel Debieve65805c12019-09-25 15:03:59 +020094 return 0;
95 }
96
97 if (spinand_dev.spi_read_cache_op.data.buswidth ==
98 SPI_MEM_BUSWIDTH_4_LINE) {
99 enable = true;
100 }
101
102 return spi_nand_update_cfg(SPI_NAND_CFG_QE,
103 enable ? SPI_NAND_CFG_QE : 0U);
104}
105
106static int spi_nand_wait_ready(uint8_t *status)
107{
108 int ret;
109 uint64_t timeout = timeout_init_us(DELAY_US_400MS);
110
111 while (!timeout_elapsed(timeout)) {
112 ret = spi_nand_read_reg(SPI_NAND_REG_STATUS, status);
113 if (ret != 0) {
114 return ret;
115 }
116
117 VERBOSE("%s Status %x\n", __func__, *status);
118 if ((*status & SPI_NAND_STATUS_BUSY) == 0U) {
119 return 0;
120 }
121 }
122
123 return -ETIMEDOUT;
124}
125
126static int spi_nand_reset(void)
127{
128 struct spi_mem_op op;
129 uint8_t status;
130 int ret;
131
132 zeromem(&op, sizeof(struct spi_mem_op));
133 op.cmd.opcode = SPI_NAND_OP_RESET;
134 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
135
136 ret = spi_mem_exec_op(&op);
137 if (ret != 0) {
138 return ret;
139 }
140
141 return spi_nand_wait_ready(&status);
142}
143
144static int spi_nand_read_id(uint8_t *id)
145{
146 struct spi_mem_op op;
147
148 zeromem(&op, sizeof(struct spi_mem_op));
149 op.cmd.opcode = SPI_NAND_OP_READ_ID;
150 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
151 op.data.dir = SPI_MEM_DATA_IN;
152 op.data.nbytes = SPI_NAND_MAX_ID_LEN;
153 op.data.buf = id;
154 op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
155
156 return spi_mem_exec_op(&op);
157}
158
159static int spi_nand_load_page(unsigned int page)
160{
161 struct spi_mem_op op;
162 uint32_t block_nb = page / spinand_dev.nand_dev->block_size;
163 uint32_t page_nb = page - (block_nb * spinand_dev.nand_dev->page_size);
164 uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
165 spinand_dev.nand_dev->page_size;
166 uint32_t block_sh = __builtin_ctz(nbpages_per_block) + 1U;
167
168 zeromem(&op, sizeof(struct spi_mem_op));
169 op.cmd.opcode = SPI_NAND_OP_LOAD_PAGE;
170 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
171 op.addr.val = (block_nb << block_sh) | page_nb;
172 op.addr.nbytes = 3U;
173 op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
174
175 return spi_mem_exec_op(&op);
176}
177
178static int spi_nand_read_from_cache(unsigned int page, unsigned int offset,
179 uint8_t *buffer, unsigned int len)
180{
181 uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
182 spinand_dev.nand_dev->page_size;
183 uint32_t block_nb = page / nbpages_per_block;
184 uint32_t page_sh = __builtin_ctz(spinand_dev.nand_dev->page_size) + 1U;
185
186 spinand_dev.spi_read_cache_op.addr.val = offset;
187
188 if ((spinand_dev.nand_dev->nb_planes > 1U) && ((block_nb % 2U) == 1U)) {
189 spinand_dev.spi_read_cache_op.addr.val |= 1U << page_sh;
190 }
191
192 spinand_dev.spi_read_cache_op.data.buf = buffer;
193 spinand_dev.spi_read_cache_op.data.nbytes = len;
194
195 return spi_mem_exec_op(&spinand_dev.spi_read_cache_op);
196}
197
198static int spi_nand_read_page(unsigned int page, unsigned int offset,
199 uint8_t *buffer, unsigned int len,
200 bool ecc_enabled)
201{
202 uint8_t status;
203 int ret;
204
205 ret = spi_nand_ecc_enable(ecc_enabled);
206 if (ret != 0) {
207 return ret;
208 }
209
210 ret = spi_nand_load_page(page);
211 if (ret != 0) {
212 return ret;
213 }
214
215 ret = spi_nand_wait_ready(&status);
216 if (ret != 0) {
217 return ret;
218 }
219
220 ret = spi_nand_read_from_cache(page, offset, buffer, len);
221 if (ret != 0) {
222 return ret;
223 }
224
225 if (ecc_enabled && ((status & SPI_NAND_STATUS_ECC_UNCOR) != 0U)) {
226 return -EBADMSG;
227 }
228
229 return 0;
230}
231
232static int spi_nand_mtd_block_is_bad(unsigned int block)
233{
234 unsigned int nbpages_per_block = spinand_dev.nand_dev->block_size /
235 spinand_dev.nand_dev->page_size;
236 uint8_t bbm_marker[2];
237 int ret;
238
239 ret = spi_nand_read_page(block * nbpages_per_block,
240 spinand_dev.nand_dev->page_size,
241 bbm_marker, sizeof(bbm_marker), false);
242 if (ret != 0) {
243 return ret;
244 }
245
246 if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
247 (bbm_marker[1] != GENMASK_32(7, 0))) {
Yann Gautier442b2232022-02-14 09:56:54 +0100248 WARN("Block %u is bad\n", block);
Lionel Debieve65805c12019-09-25 15:03:59 +0200249 return 1;
250 }
251
252 return 0;
253}
254
255static int spi_nand_mtd_read_page(struct nand_device *nand, unsigned int page,
256 uintptr_t buffer)
257{
258 return spi_nand_read_page(page, 0, (uint8_t *)buffer,
259 spinand_dev.nand_dev->page_size, true);
260}
261
262int spi_nand_init(unsigned long long *size, unsigned int *erase_size)
263{
264 uint8_t id[SPI_NAND_MAX_ID_LEN];
265 int ret;
266
267 spinand_dev.nand_dev = get_nand_device();
268 if (spinand_dev.nand_dev == NULL) {
269 return -EINVAL;
270 }
271
272 spinand_dev.nand_dev->mtd_block_is_bad = spi_nand_mtd_block_is_bad;
273 spinand_dev.nand_dev->mtd_read_page = spi_nand_mtd_read_page;
274 spinand_dev.nand_dev->nb_planes = 1;
275
276 spinand_dev.spi_read_cache_op.cmd.opcode = SPI_NAND_OP_READ_FROM_CACHE;
277 spinand_dev.spi_read_cache_op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
278 spinand_dev.spi_read_cache_op.addr.nbytes = 2U;
279 spinand_dev.spi_read_cache_op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
280 spinand_dev.spi_read_cache_op.dummy.nbytes = 1U;
281 spinand_dev.spi_read_cache_op.dummy.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
282 spinand_dev.spi_read_cache_op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
283
284 if (plat_get_spi_nand_data(&spinand_dev) != 0) {
285 return -EINVAL;
286 }
287
Christophe Kerellofc4c4002021-06-18 15:09:37 +0200288 assert((spinand_dev.nand_dev->page_size != 0U) &&
289 (spinand_dev.nand_dev->block_size != 0U) &&
290 (spinand_dev.nand_dev->size != 0U));
291
Lionel Debieve65805c12019-09-25 15:03:59 +0200292 ret = spi_nand_reset();
293 if (ret != 0) {
294 return ret;
295 }
296
297 ret = spi_nand_read_id(id);
298 if (ret != 0) {
299 return ret;
300 }
301
302 ret = spi_nand_read_reg(SPI_NAND_REG_CFG, &spinand_dev.cfg_cache);
303 if (ret != 0) {
304 return ret;
305 }
306
Christophe Kerelloa9ae6b12021-06-18 15:38:40 +0200307 ret = spi_nand_quad_enable(id[1]);
Lionel Debieve65805c12019-09-25 15:03:59 +0200308 if (ret != 0) {
309 return ret;
310 }
311
Christophe Kerelloa9ae6b12021-06-18 15:38:40 +0200312 VERBOSE("SPI_NAND Detected ID 0x%x\n", id[1]);
Lionel Debieve65805c12019-09-25 15:03:59 +0200313
Yann Gautier442b2232022-02-14 09:56:54 +0100314 VERBOSE("Page size %u, Block size %u, size %llu\n",
Lionel Debieve65805c12019-09-25 15:03:59 +0200315 spinand_dev.nand_dev->page_size,
316 spinand_dev.nand_dev->block_size,
317 spinand_dev.nand_dev->size);
318
319 *size = spinand_dev.nand_dev->size;
320 *erase_size = spinand_dev.nand_dev->block_size;
321
322 return 0;
323}