blob: 542b614ffeeef0981810b7ac60185364c3dfd240 [file] [log] [blame]
Lionel Debieve65805c12019-09-25 15:03:59 +02001/*
Yann Gautier442b2232022-02-14 09:56:54 +01002 * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
Lionel Debieve65805c12019-09-25 15:03:59 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <stddef.h>
10
Lionel Debieve65805c12019-09-25 15:03:59 +020011#include <common/debug.h>
12#include <drivers/delay_timer.h>
13#include <drivers/spi_nand.h>
14#include <lib/utils.h>
15
Yann Gautier442b2232022-02-14 09:56:54 +010016#include <platform_def.h>
17
Lionel Debieve65805c12019-09-25 15:03:59 +020018#define SPI_NAND_MAX_ID_LEN 4U
19#define DELAY_US_400MS 400000U
20#define MACRONIX_ID 0xC2U
21
22static struct spinand_device spinand_dev;
23
24#pragma weak plat_get_spi_nand_data
25int plat_get_spi_nand_data(struct spinand_device *device)
26{
27 return 0;
28}
29
30static int spi_nand_reg(bool read_reg, uint8_t reg, uint8_t *val,
31 enum spi_mem_data_dir dir)
32{
33 struct spi_mem_op op;
34
35 zeromem(&op, sizeof(struct spi_mem_op));
36 if (read_reg) {
37 op.cmd.opcode = SPI_NAND_OP_GET_FEATURE;
38 } else {
39 op.cmd.opcode = SPI_NAND_OP_SET_FEATURE;
40 }
41
42 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
43 op.addr.val = reg;
44 op.addr.nbytes = 1U;
45 op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
46 op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
47 op.data.dir = dir;
48 op.data.nbytes = 1U;
49 op.data.buf = val;
50
51 return spi_mem_exec_op(&op);
52}
53
54static int spi_nand_read_reg(uint8_t reg, uint8_t *val)
55{
56 return spi_nand_reg(true, reg, val, SPI_MEM_DATA_IN);
57}
58
59static int spi_nand_write_reg(uint8_t reg, uint8_t val)
60{
61 return spi_nand_reg(false, reg, &val, SPI_MEM_DATA_OUT);
62}
63
64static int spi_nand_update_cfg(uint8_t mask, uint8_t val)
65{
66 int ret;
67 uint8_t cfg = spinand_dev.cfg_cache;
68
69 cfg &= ~mask;
70 cfg |= val;
71
72 if (cfg == spinand_dev.cfg_cache) {
73 return 0;
74 }
75
76 ret = spi_nand_write_reg(SPI_NAND_REG_CFG, cfg);
77 if (ret == 0) {
78 spinand_dev.cfg_cache = cfg;
79 }
80
81 return ret;
82}
83
84static int spi_nand_ecc_enable(bool enable)
85{
86 return spi_nand_update_cfg(SPI_NAND_CFG_ECC_EN,
87 enable ? SPI_NAND_CFG_ECC_EN : 0U);
88}
89
90static int spi_nand_quad_enable(uint8_t manufacturer_id)
91{
92 bool enable = false;
93
94 if (manufacturer_id != MACRONIX_ID) {
95 return 0;
96 }
97
98 if (spinand_dev.spi_read_cache_op.data.buswidth ==
99 SPI_MEM_BUSWIDTH_4_LINE) {
100 enable = true;
101 }
102
103 return spi_nand_update_cfg(SPI_NAND_CFG_QE,
104 enable ? SPI_NAND_CFG_QE : 0U);
105}
106
107static int spi_nand_wait_ready(uint8_t *status)
108{
109 int ret;
110 uint64_t timeout = timeout_init_us(DELAY_US_400MS);
111
112 while (!timeout_elapsed(timeout)) {
113 ret = spi_nand_read_reg(SPI_NAND_REG_STATUS, status);
114 if (ret != 0) {
115 return ret;
116 }
117
118 VERBOSE("%s Status %x\n", __func__, *status);
119 if ((*status & SPI_NAND_STATUS_BUSY) == 0U) {
120 return 0;
121 }
122 }
123
124 return -ETIMEDOUT;
125}
126
127static int spi_nand_reset(void)
128{
129 struct spi_mem_op op;
130 uint8_t status;
131 int ret;
132
133 zeromem(&op, sizeof(struct spi_mem_op));
134 op.cmd.opcode = SPI_NAND_OP_RESET;
135 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
136
137 ret = spi_mem_exec_op(&op);
138 if (ret != 0) {
139 return ret;
140 }
141
142 return spi_nand_wait_ready(&status);
143}
144
145static int spi_nand_read_id(uint8_t *id)
146{
147 struct spi_mem_op op;
148
149 zeromem(&op, sizeof(struct spi_mem_op));
150 op.cmd.opcode = SPI_NAND_OP_READ_ID;
151 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
152 op.data.dir = SPI_MEM_DATA_IN;
153 op.data.nbytes = SPI_NAND_MAX_ID_LEN;
154 op.data.buf = id;
155 op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
156
157 return spi_mem_exec_op(&op);
158}
159
160static int spi_nand_load_page(unsigned int page)
161{
162 struct spi_mem_op op;
163 uint32_t block_nb = page / spinand_dev.nand_dev->block_size;
164 uint32_t page_nb = page - (block_nb * spinand_dev.nand_dev->page_size);
165 uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
166 spinand_dev.nand_dev->page_size;
167 uint32_t block_sh = __builtin_ctz(nbpages_per_block) + 1U;
168
169 zeromem(&op, sizeof(struct spi_mem_op));
170 op.cmd.opcode = SPI_NAND_OP_LOAD_PAGE;
171 op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
172 op.addr.val = (block_nb << block_sh) | page_nb;
173 op.addr.nbytes = 3U;
174 op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
175
176 return spi_mem_exec_op(&op);
177}
178
179static int spi_nand_read_from_cache(unsigned int page, unsigned int offset,
180 uint8_t *buffer, unsigned int len)
181{
182 uint32_t nbpages_per_block = spinand_dev.nand_dev->block_size /
183 spinand_dev.nand_dev->page_size;
184 uint32_t block_nb = page / nbpages_per_block;
185 uint32_t page_sh = __builtin_ctz(spinand_dev.nand_dev->page_size) + 1U;
186
187 spinand_dev.spi_read_cache_op.addr.val = offset;
188
189 if ((spinand_dev.nand_dev->nb_planes > 1U) && ((block_nb % 2U) == 1U)) {
190 spinand_dev.spi_read_cache_op.addr.val |= 1U << page_sh;
191 }
192
193 spinand_dev.spi_read_cache_op.data.buf = buffer;
194 spinand_dev.spi_read_cache_op.data.nbytes = len;
195
196 return spi_mem_exec_op(&spinand_dev.spi_read_cache_op);
197}
198
199static int spi_nand_read_page(unsigned int page, unsigned int offset,
200 uint8_t *buffer, unsigned int len,
201 bool ecc_enabled)
202{
203 uint8_t status;
204 int ret;
205
206 ret = spi_nand_ecc_enable(ecc_enabled);
207 if (ret != 0) {
208 return ret;
209 }
210
211 ret = spi_nand_load_page(page);
212 if (ret != 0) {
213 return ret;
214 }
215
216 ret = spi_nand_wait_ready(&status);
217 if (ret != 0) {
218 return ret;
219 }
220
221 ret = spi_nand_read_from_cache(page, offset, buffer, len);
222 if (ret != 0) {
223 return ret;
224 }
225
226 if (ecc_enabled && ((status & SPI_NAND_STATUS_ECC_UNCOR) != 0U)) {
227 return -EBADMSG;
228 }
229
230 return 0;
231}
232
233static int spi_nand_mtd_block_is_bad(unsigned int block)
234{
235 unsigned int nbpages_per_block = spinand_dev.nand_dev->block_size /
236 spinand_dev.nand_dev->page_size;
237 uint8_t bbm_marker[2];
238 int ret;
239
240 ret = spi_nand_read_page(block * nbpages_per_block,
241 spinand_dev.nand_dev->page_size,
242 bbm_marker, sizeof(bbm_marker), false);
243 if (ret != 0) {
244 return ret;
245 }
246
247 if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
248 (bbm_marker[1] != GENMASK_32(7, 0))) {
Yann Gautier442b2232022-02-14 09:56:54 +0100249 WARN("Block %u is bad\n", block);
Lionel Debieve65805c12019-09-25 15:03:59 +0200250 return 1;
251 }
252
253 return 0;
254}
255
256static int spi_nand_mtd_read_page(struct nand_device *nand, unsigned int page,
257 uintptr_t buffer)
258{
259 return spi_nand_read_page(page, 0, (uint8_t *)buffer,
260 spinand_dev.nand_dev->page_size, true);
261}
262
263int spi_nand_init(unsigned long long *size, unsigned int *erase_size)
264{
265 uint8_t id[SPI_NAND_MAX_ID_LEN];
266 int ret;
267
268 spinand_dev.nand_dev = get_nand_device();
269 if (spinand_dev.nand_dev == NULL) {
270 return -EINVAL;
271 }
272
273 spinand_dev.nand_dev->mtd_block_is_bad = spi_nand_mtd_block_is_bad;
274 spinand_dev.nand_dev->mtd_read_page = spi_nand_mtd_read_page;
275 spinand_dev.nand_dev->nb_planes = 1;
276
277 spinand_dev.spi_read_cache_op.cmd.opcode = SPI_NAND_OP_READ_FROM_CACHE;
278 spinand_dev.spi_read_cache_op.cmd.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
279 spinand_dev.spi_read_cache_op.addr.nbytes = 2U;
280 spinand_dev.spi_read_cache_op.addr.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
281 spinand_dev.spi_read_cache_op.dummy.nbytes = 1U;
282 spinand_dev.spi_read_cache_op.dummy.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
283 spinand_dev.spi_read_cache_op.data.buswidth = SPI_MEM_BUSWIDTH_1_LINE;
284
285 if (plat_get_spi_nand_data(&spinand_dev) != 0) {
286 return -EINVAL;
287 }
288
Christophe Kerellofc4c4002021-06-18 15:09:37 +0200289 assert((spinand_dev.nand_dev->page_size != 0U) &&
290 (spinand_dev.nand_dev->block_size != 0U) &&
291 (spinand_dev.nand_dev->size != 0U));
292
Lionel Debieve65805c12019-09-25 15:03:59 +0200293 ret = spi_nand_reset();
294 if (ret != 0) {
295 return ret;
296 }
297
298 ret = spi_nand_read_id(id);
299 if (ret != 0) {
300 return ret;
301 }
302
303 ret = spi_nand_read_reg(SPI_NAND_REG_CFG, &spinand_dev.cfg_cache);
304 if (ret != 0) {
305 return ret;
306 }
307
Christophe Kerelloa9ae6b12021-06-18 15:38:40 +0200308 ret = spi_nand_quad_enable(id[1]);
Lionel Debieve65805c12019-09-25 15:03:59 +0200309 if (ret != 0) {
310 return ret;
311 }
312
Christophe Kerelloa9ae6b12021-06-18 15:38:40 +0200313 VERBOSE("SPI_NAND Detected ID 0x%x\n", id[1]);
Lionel Debieve65805c12019-09-25 15:03:59 +0200314
Yann Gautier442b2232022-02-14 09:56:54 +0100315 VERBOSE("Page size %u, Block size %u, size %llu\n",
Lionel Debieve65805c12019-09-25 15:03:59 +0200316 spinand_dev.nand_dev->page_size,
317 spinand_dev.nand_dev->block_size,
318 spinand_dev.nand_dev->size);
319
320 *size = spinand_dev.nand_dev->size;
321 *erase_size = spinand_dev.nand_dev->block_size;
322
323 return 0;
324}