blob: 3595c214249d8e3f1adfb9ce881b0a44c16e9c77 [file] [log] [blame]
Lionel Debieve64a524d2019-09-09 20:13:34 +02001/*
Yann Gautier8a87b0e2022-02-08 10:21:58 +01002 * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
Lionel Debieve64a524d2019-09-09 20:13:34 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <stddef.h>
10
Lionel Debieve64a524d2019-09-09 20:13:34 +020011#include <common/debug.h>
12#include <drivers/delay_timer.h>
13#include <drivers/raw_nand.h>
14#include <lib/utils.h>
15
Yann Gautier8a87b0e2022-02-08 10:21:58 +010016#include <platform_def.h>
17
Lionel Debieve64a524d2019-09-09 20:13:34 +020018#define ONFI_SIGNATURE_ADDR 0x20U
19
20/* CRC calculation */
21#define CRC_POLYNOM 0x8005U
22#define CRC_INIT_VALUE 0x4F4EU
23
24/* Status register */
25#define NAND_STATUS_READY BIT(6)
26
Lionel Debieve64a524d2019-09-09 20:13:34 +020027static struct rawnand_device rawnand_dev;
28
29#pragma weak plat_get_raw_nand_data
30int plat_get_raw_nand_data(struct rawnand_device *device)
31{
32 return 0;
33}
34
35static int nand_send_cmd(uint8_t cmd, unsigned int tim)
36{
37 struct nand_req req;
38
39 zeromem(&req, sizeof(struct nand_req));
40 req.nand = rawnand_dev.nand_dev;
41 req.type = NAND_REQ_CMD | cmd;
42 req.inst_delay = tim;
43
44 return rawnand_dev.ops->exec(&req);
45}
46
47static int nand_send_addr(uint8_t addr, unsigned int tim)
48{
49 struct nand_req req;
50
51 zeromem(&req, sizeof(struct nand_req));
52 req.nand = rawnand_dev.nand_dev;
53 req.type = NAND_REQ_ADDR;
54 req.addr = &addr;
55 req.inst_delay = tim;
56
57 return rawnand_dev.ops->exec(&req);
58}
59
60static int nand_send_wait(unsigned int delay, unsigned int tim)
61{
62 struct nand_req req;
63
64 zeromem(&req, sizeof(struct nand_req));
65 req.nand = rawnand_dev.nand_dev;
66 req.type = NAND_REQ_WAIT;
67 req.inst_delay = tim;
68 req.delay_ms = delay;
69
70 return rawnand_dev.ops->exec(&req);
71}
72
73
74static int nand_read_data(uint8_t *data, unsigned int length, bool use_8bit)
75{
76 struct nand_req req;
77
78 zeromem(&req, sizeof(struct nand_req));
79 req.nand = rawnand_dev.nand_dev;
80 req.type = NAND_REQ_DATAIN | (use_8bit ? NAND_REQ_BUS_WIDTH_8 : 0U);
81 req.addr = data;
82 req.length = length;
83
84 return rawnand_dev.ops->exec(&req);
85}
86
87int nand_change_read_column_cmd(unsigned int offset, uintptr_t buffer,
88 unsigned int len)
89{
90 int ret;
91 uint8_t addr[2];
92 unsigned int i;
93
94 ret = nand_send_cmd(NAND_CMD_CHANGE_1ST, 0U);
95 if (ret != 0) {
96 return ret;
97 }
98
99 if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
100 offset /= 2U;
101 }
102
103 addr[0] = offset;
104 addr[1] = offset >> 8;
105
106 for (i = 0; i < 2U; i++) {
107 ret = nand_send_addr(addr[i], 0U);
108 if (ret != 0) {
109 return ret;
110 }
111 }
112
113 ret = nand_send_cmd(NAND_CMD_CHANGE_2ND, NAND_TCCS_MIN);
114 if (ret != 0) {
115 return ret;
116 }
117
118 return nand_read_data((uint8_t *)buffer, len, false);
119}
120
121int nand_read_page_cmd(unsigned int page, unsigned int offset,
122 uintptr_t buffer, unsigned int len)
123{
124 uint8_t addr[5];
125 uint8_t i = 0U;
126 uint8_t j;
127 int ret;
128
129 VERBOSE(">%s page %u offset %u buffer 0x%lx\n", __func__, page, offset,
130 buffer);
131
132 if (rawnand_dev.nand_dev->buswidth == NAND_BUS_WIDTH_16) {
133 offset /= 2U;
134 }
135
136 addr[i++] = offset;
137 addr[i++] = offset >> 8;
138
139 addr[i++] = page;
140 addr[i++] = page >> 8;
141 if (rawnand_dev.nand_dev->size > SZ_128M) {
142 addr[i++] = page >> 16;
143 }
144
145 ret = nand_send_cmd(NAND_CMD_READ_1ST, 0U);
146 if (ret != 0) {
147 return ret;
148 }
149
150 for (j = 0U; j < i; j++) {
151 ret = nand_send_addr(addr[j], 0U);
152 if (ret != 0) {
153 return ret;
154 }
155 }
156
157 ret = nand_send_cmd(NAND_CMD_READ_2ND, NAND_TWB_MAX);
158 if (ret != 0) {
159 return ret;
160 }
161
162 ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
163 if (ret != 0) {
164 return ret;
165 }
166
167 if (buffer != 0U) {
168 ret = nand_read_data((uint8_t *)buffer, len, false);
169 }
170
171 return ret;
172}
173
174static int nand_status(uint8_t *status)
175{
176 int ret;
177
178 ret = nand_send_cmd(NAND_CMD_STATUS, NAND_TWHR_MIN);
179 if (ret != 0) {
180 return ret;
181 }
182
183 if (status != NULL) {
184 ret = nand_read_data(status, 1U, true);
185 }
186
187 return ret;
188}
189
Lionel Debieve7c58f5a2020-08-26 16:17:02 +0200190int nand_wait_ready(unsigned int delay_ms)
Lionel Debieve64a524d2019-09-09 20:13:34 +0200191{
192 uint8_t status;
193 int ret;
194 uint64_t timeout;
195
196 /* Wait before reading status */
197 udelay(1);
198
199 ret = nand_status(NULL);
200 if (ret != 0) {
201 return ret;
202 }
203
Lionel Debieve7c58f5a2020-08-26 16:17:02 +0200204 timeout = timeout_init_us(delay_ms * 1000U);
Lionel Debieve64a524d2019-09-09 20:13:34 +0200205 while (!timeout_elapsed(timeout)) {
206 ret = nand_read_data(&status, 1U, true);
207 if (ret != 0) {
208 return ret;
209 }
210
211 if ((status & NAND_STATUS_READY) != 0U) {
212 return nand_send_cmd(NAND_CMD_READ_1ST, 0U);
213 }
214
215 udelay(10);
216 }
217
218 return -ETIMEDOUT;
219}
220
Christophe Kerello75461492021-05-17 15:20:31 +0200221static int nand_reset(void)
222{
223 int ret;
224
225 ret = nand_send_cmd(NAND_CMD_RESET, NAND_TWB_MAX);
226 if (ret != 0) {
227 return ret;
228 }
229
230 return nand_send_wait(PSEC_TO_MSEC(NAND_TRST_MAX), 0U);
231}
232
Lionel Debieve64a524d2019-09-09 20:13:34 +0200233#if NAND_ONFI_DETECT
234static uint16_t nand_check_crc(uint16_t crc, uint8_t *data_in,
235 unsigned int data_len)
236{
237 uint32_t i;
238 uint32_t j;
239 uint32_t bit;
240
241 for (i = 0U; i < data_len; i++) {
242 uint8_t cur_param = *data_in++;
243
244 for (j = BIT(7); j != 0U; j >>= 1) {
245 bit = crc & BIT(15);
246 crc <<= 1;
247
248 if ((cur_param & j) != 0U) {
249 bit ^= BIT(15);
250 }
251
252 if (bit != 0U) {
253 crc ^= CRC_POLYNOM;
254 }
255 }
256
257 crc &= GENMASK(15, 0);
258 }
259
260 return crc;
261}
262
263static int nand_read_id(uint8_t addr, uint8_t *id, unsigned int size)
264{
265 int ret;
266
267 ret = nand_send_cmd(NAND_CMD_READID, 0U);
268 if (ret != 0) {
269 return ret;
270 }
271
272 ret = nand_send_addr(addr, NAND_TWHR_MIN);
273 if (ret != 0) {
274 return ret;
275 }
276
277 return nand_read_data(id, size, true);
278}
279
Lionel Debieve64a524d2019-09-09 20:13:34 +0200280static int nand_read_param_page(void)
281{
282 struct nand_param_page page;
283 uint8_t addr = 0U;
284 int ret;
285
286 ret = nand_send_cmd(NAND_CMD_READ_PARAM_PAGE, 0U);
287 if (ret != 0) {
288 return ret;
289 }
290
291 ret = nand_send_addr(addr, NAND_TWB_MAX);
292 if (ret != 0) {
293 return ret;
294 }
295
296 ret = nand_send_wait(PSEC_TO_MSEC(NAND_TR_MAX), NAND_TRR_MIN);
297 if (ret != 0) {
298 return ret;
299 }
300
301 ret = nand_read_data((uint8_t *)&page, sizeof(page), true);
302 if (ret != 0) {
303 return ret;
304 }
305
306 if (strncmp((char *)&page.page_sig, "ONFI", 4) != 0) {
307 WARN("Error ONFI detection\n");
308 return -EINVAL;
309 }
310
311 if (nand_check_crc(CRC_INIT_VALUE, (uint8_t *)&page, 254U) !=
312 page.crc16) {
313 WARN("Error reading param\n");
314 return -EINVAL;
315 }
316
317 if ((page.features & ONFI_FEAT_BUS_WIDTH_16) != 0U) {
318 rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_16;
319 } else {
320 rawnand_dev.nand_dev->buswidth = NAND_BUS_WIDTH_8;
321 }
322
323 rawnand_dev.nand_dev->block_size = page.num_pages_per_blk *
324 page.bytes_per_page;
325 rawnand_dev.nand_dev->page_size = page.bytes_per_page;
326 rawnand_dev.nand_dev->size = page.num_pages_per_blk *
327 page.bytes_per_page *
328 page.num_blk_in_lun * page.num_lun;
329
330 if (page.nb_ecc_bits != GENMASK_32(7, 0)) {
331 rawnand_dev.nand_dev->ecc.max_bit_corr = page.nb_ecc_bits;
332 rawnand_dev.nand_dev->ecc.size = SZ_512;
333 }
334
335 VERBOSE("Page size %u, block_size %u, Size %llu, ecc %u, buswidth %u\n",
336 rawnand_dev.nand_dev->page_size,
337 rawnand_dev.nand_dev->block_size, rawnand_dev.nand_dev->size,
338 rawnand_dev.nand_dev->ecc.max_bit_corr,
339 rawnand_dev.nand_dev->buswidth);
340
341 return 0;
342}
343
344static int detect_onfi(void)
345{
346 int ret;
347 char id[4];
348
Lionel Debieve64a524d2019-09-09 20:13:34 +0200349 ret = nand_read_id(ONFI_SIGNATURE_ADDR, (uint8_t *)id, sizeof(id));
350 if (ret != 0) {
351 return ret;
352 }
353
354 if (strncmp(id, "ONFI", sizeof(id)) != 0) {
355 WARN("NAND Non ONFI detected\n");
356 return -ENODEV;
357 }
358
359 return nand_read_param_page();
360}
361#endif
362
363static int nand_mtd_block_is_bad(unsigned int block)
364{
365 unsigned int nbpages_per_block = rawnand_dev.nand_dev->block_size /
366 rawnand_dev.nand_dev->page_size;
367 uint8_t bbm_marker[2];
368 uint8_t page;
369 int ret;
370
371 for (page = 0U; page < 2U; page++) {
372 ret = nand_read_page_cmd(block * nbpages_per_block,
373 rawnand_dev.nand_dev->page_size,
374 (uintptr_t)bbm_marker,
375 sizeof(bbm_marker));
376 if (ret != 0) {
377 return ret;
378 }
379
380 if ((bbm_marker[0] != GENMASK_32(7, 0)) ||
381 (bbm_marker[1] != GENMASK_32(7, 0))) {
382 WARN("Block %u is bad\n", block);
383 return 1;
384 }
385 }
386
387 return 0;
388}
389
390static int nand_mtd_read_page_raw(struct nand_device *nand, unsigned int page,
391 uintptr_t buffer)
392{
393 return nand_read_page_cmd(page, 0U, buffer,
394 rawnand_dev.nand_dev->page_size);
395}
396
397void nand_raw_ctrl_init(const struct nand_ctrl_ops *ops)
398{
399 rawnand_dev.ops = ops;
400}
401
402int nand_raw_init(unsigned long long *size, unsigned int *erase_size)
403{
Christophe Kerello75461492021-05-17 15:20:31 +0200404 int ret;
405
Lionel Debieve64a524d2019-09-09 20:13:34 +0200406 rawnand_dev.nand_dev = get_nand_device();
407 if (rawnand_dev.nand_dev == NULL) {
408 return -EINVAL;
409 }
410
411 rawnand_dev.nand_dev->mtd_block_is_bad = nand_mtd_block_is_bad;
412 rawnand_dev.nand_dev->mtd_read_page = nand_mtd_read_page_raw;
413 rawnand_dev.nand_dev->ecc.mode = NAND_ECC_NONE;
414
415 if ((rawnand_dev.ops->setup == NULL) ||
416 (rawnand_dev.ops->exec == NULL)) {
417 return -ENODEV;
418 }
419
Christophe Kerello75461492021-05-17 15:20:31 +0200420 ret = nand_reset();
421 if (ret != 0) {
422 return ret;
423 }
424
Lionel Debieve64a524d2019-09-09 20:13:34 +0200425#if NAND_ONFI_DETECT
426 if (detect_onfi() != 0) {
427 WARN("Detect ONFI failed\n");
428 }
429#endif
430
431 if (plat_get_raw_nand_data(&rawnand_dev) != 0) {
432 return -EINVAL;
433 }
434
435 assert((rawnand_dev.nand_dev->page_size != 0U) &&
436 (rawnand_dev.nand_dev->block_size != 0U) &&
437 (rawnand_dev.nand_dev->size != 0U));
438
439 *size = rawnand_dev.nand_dev->size;
440 *erase_size = rawnand_dev.nand_dev->block_size;
441
442 rawnand_dev.ops->setup(rawnand_dev.nand_dev);
443
444 return 0;
445}