blob: 3daacbb3308fe5db946b4bc15f57d38489dfa89d [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Tim Harveydcf40662014-06-02 16:13:18 -07002/*
3 * Copyright (C) 2014 Gateworks Corporation
Ye Licf639232020-05-04 22:08:55 +08004 * Copyright 2019 NXP
Tim Harveydcf40662014-06-02 16:13:18 -07005 * Author: Tim Harvey <tharvey@gateworks.com>
Tim Harveydcf40662014-06-02 16:13:18 -07006 */
7#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Tim Harveydcf40662014-06-02 16:13:18 -07009#include <nand.h>
10#include <malloc.h>
Shyam Sainif63ef492019-06-14 13:05:33 +053011#include <mxs_nand.h>
Simon Glass274e0b02020-05-10 11:39:56 -060012#include <asm/cache.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060013#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060014#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070015#include <linux/err.h>
Tom Rini3bde7e22021-09-22 14:50:35 -040016#include <linux/mtd/rawnand.h>
Tim Harveydcf40662014-06-02 16:13:18 -070017
Scott Wood2c1b7e12016-05-30 13:57:55 -050018static struct mtd_info *mtd;
Tim Harveydcf40662014-06-02 16:13:18 -070019static struct nand_chip nand_chip;
20
21static void mxs_nand_command(struct mtd_info *mtd, unsigned int command,
22 int column, int page_addr)
23{
Scott Wood17fed142016-05-30 13:57:56 -050024 register struct nand_chip *chip = mtd_to_nand(mtd);
Tim Harveydcf40662014-06-02 16:13:18 -070025 u32 timeo, time_start;
26
27 /* write out the command to the device */
28 chip->cmd_ctrl(mtd, command, NAND_CLE);
29
30 /* Serially input address */
31 if (column != -1) {
Andrea Sciand2f7d072022-06-21 22:05:10 +020032 /* Adjust columns for 16 bit buswidth */
33 if (chip->options & NAND_BUSWIDTH_16 &&
34 !nand_opcode_8bits(command))
35 column >>= 1;
Tim Harveydcf40662014-06-02 16:13:18 -070036 chip->cmd_ctrl(mtd, column, NAND_ALE);
Andrea Sciand2f7d072022-06-21 22:05:10 +020037
38 /*
39 * Assume LP NAND here, so use two bytes column address
40 * but not for CMD_READID and CMD_PARAM, which require
41 * only one byte column address
42 */
43 if (command != NAND_CMD_READID &&
44 command != NAND_CMD_PARAM)
45 chip->cmd_ctrl(mtd, column >> 8, NAND_ALE);
Tim Harveydcf40662014-06-02 16:13:18 -070046 }
47 if (page_addr != -1) {
48 chip->cmd_ctrl(mtd, page_addr, NAND_ALE);
49 chip->cmd_ctrl(mtd, page_addr >> 8, NAND_ALE);
50 /* One more address cycle for devices > 128MiB */
51 if (chip->chipsize > (128 << 20))
52 chip->cmd_ctrl(mtd, page_addr >> 16, NAND_ALE);
53 }
54 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0);
55
56 if (command == NAND_CMD_READ0) {
57 chip->cmd_ctrl(mtd, NAND_CMD_READSTART, NAND_CLE);
58 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0);
Ye Licf639232020-05-04 22:08:55 +080059 } else if (command == NAND_CMD_RNDOUT) {
60 /* No ready / busy check necessary */
61 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
62 NAND_NCE | NAND_CLE);
63 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
64 NAND_NCE);
Tim Harveydcf40662014-06-02 16:13:18 -070065 }
66
67 /* wait for nand ready */
68 ndelay(100);
69 timeo = (CONFIG_SYS_HZ * 20) / 1000;
70 time_start = get_timer(0);
71 while (get_timer(time_start) < timeo) {
72 if (chip->dev_ready(mtd))
73 break;
74 }
75}
76
Jörg Krause7440e4b2018-01-14 19:26:40 +010077#if defined (CONFIG_SPL_NAND_IDENT)
78
79/* Trying to detect the NAND flash using ONFi, JEDEC, and (extended) IDs */
80static int mxs_flash_full_ident(struct mtd_info *mtd)
81{
82 int nand_maf_id, nand_dev_id;
83 struct nand_chip *chip = mtd_to_nand(mtd);
84 struct nand_flash_dev *type;
85
86 type = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, NULL);
87
88 if (IS_ERR(type)) {
89 chip->select_chip(mtd, -1);
90 return PTR_ERR(type);
91 }
92
93 return 0;
94}
95
96#else
97
98/* Trying to detect the NAND flash using ONFi only */
Jörg Krause404a9db2018-01-14 19:26:39 +010099static int mxs_flash_onfi_ident(struct mtd_info *mtd)
Tim Harveydcf40662014-06-02 16:13:18 -0700100{
Scott Wood17fed142016-05-30 13:57:56 -0500101 register struct nand_chip *chip = mtd_to_nand(mtd);
Tim Harveydcf40662014-06-02 16:13:18 -0700102 int i;
103 u8 mfg_id, dev_id;
104 u8 id_data[8];
105 struct nand_onfi_params *p = &chip->onfi_params;
106
107 /* Reset the chip */
108 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
109
110 /* Send the command for reading device ID */
111 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
112
113 /* Read manufacturer and device IDs */
114 mfg_id = chip->read_byte(mtd);
115 dev_id = chip->read_byte(mtd);
116
117 /* Try again to make sure */
118 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
119 for (i = 0; i < 8; i++)
120 id_data[i] = chip->read_byte(mtd);
121 if (id_data[0] != mfg_id || id_data[1] != dev_id) {
122 printf("second ID read did not match");
123 return -1;
124 }
125 debug("0x%02x:0x%02x ", mfg_id, dev_id);
126
127 /* read ONFI */
128 chip->onfi_version = 0;
129 chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
130 if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
131 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') {
132 return -2;
133 }
134
135 /* we have ONFI, probe it */
136 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
137 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
138 mtd->name = p->model;
139 mtd->writesize = le32_to_cpu(p->byte_per_page);
140 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
141 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
142 chip->chipsize = le32_to_cpu(p->blocks_per_lun);
143 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
144 /* Calculate the address shift from the page size */
145 chip->page_shift = ffs(mtd->writesize) - 1;
146 chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
147 /* Convert chipsize to number of pages per chip -1 */
148 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
149 chip->badblockbits = 8;
150
151 debug("erasesize=%d (>>%d)\n", mtd->erasesize, chip->phys_erase_shift);
152 debug("writesize=%d (>>%d)\n", mtd->writesize, chip->page_shift);
153 debug("oobsize=%d\n", mtd->oobsize);
154 debug("chipsize=%lld\n", chip->chipsize);
155
156 return 0;
157}
158
Jörg Krause7440e4b2018-01-14 19:26:40 +0100159#endif /* CONFIG_SPL_NAND_IDENT */
160
Jörg Krause404a9db2018-01-14 19:26:39 +0100161static int mxs_flash_ident(struct mtd_info *mtd)
162{
163 int ret;
Jörg Krause7440e4b2018-01-14 19:26:40 +0100164#if defined (CONFIG_SPL_NAND_IDENT)
165 ret = mxs_flash_full_ident(mtd);
166#else
Jörg Krause404a9db2018-01-14 19:26:39 +0100167 ret = mxs_flash_onfi_ident(mtd);
Jörg Krause7440e4b2018-01-14 19:26:40 +0100168#endif
Jörg Krause404a9db2018-01-14 19:26:39 +0100169 return ret;
170}
171
Tim Harveydcf40662014-06-02 16:13:18 -0700172static int mxs_read_page_ecc(struct mtd_info *mtd, void *buf, unsigned int page)
173{
Scott Wood17fed142016-05-30 13:57:56 -0500174 register struct nand_chip *chip = mtd_to_nand(mtd);
Tim Harveydcf40662014-06-02 16:13:18 -0700175 int ret;
176
177 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, page);
178 ret = nand_chip.ecc.read_page(mtd, chip, buf, 1, page);
179 if (ret < 0) {
180 printf("read_page failed %d\n", ret);
181 return -1;
182 }
183 return 0;
184}
185
186static int is_badblock(struct mtd_info *mtd, loff_t offs, int allowbbt)
187{
Scott Wood17fed142016-05-30 13:57:56 -0500188 register struct nand_chip *chip = mtd_to_nand(mtd);
Tim Harveydcf40662014-06-02 16:13:18 -0700189 unsigned int block = offs >> chip->phys_erase_shift;
190 unsigned int page = offs >> chip->page_shift;
191
192 debug("%s offs=0x%08x block:%d page:%d\n", __func__, (int)offs, block,
193 page);
194 chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
195 memset(chip->oob_poi, 0, mtd->oobsize);
196 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
197
198 return chip->oob_poi[0] != 0xff;
199}
200
201/* setup mtd and nand structs and init mxs_nand driver */
Adam Ford858dd272019-02-18 17:58:17 -0600202void nand_init(void)
Tim Harveydcf40662014-06-02 16:13:18 -0700203{
204 /* return if already initalized */
205 if (nand_chip.numchips)
Adam Ford858dd272019-02-18 17:58:17 -0600206 return;
Tim Harveydcf40662014-06-02 16:13:18 -0700207
208 /* init mxs nand driver */
Stefan Agner7152f342018-06-22 17:19:46 +0200209 mxs_nand_init_spl(&nand_chip);
Boris Brezillon3b5f8842016-06-15 20:56:10 +0200210 mtd = nand_to_mtd(&nand_chip);
Tim Harveydcf40662014-06-02 16:13:18 -0700211 /* set mtd functions */
212 nand_chip.cmdfunc = mxs_nand_command;
Adam Fordcf873712018-12-30 10:11:16 -0600213 nand_chip.scan_bbt = nand_default_bbt;
Tim Harveydcf40662014-06-02 16:13:18 -0700214 nand_chip.numchips = 1;
215
216 /* identify flash device */
Scott Wood2c1b7e12016-05-30 13:57:55 -0500217 if (mxs_flash_ident(mtd)) {
Tim Harveydcf40662014-06-02 16:13:18 -0700218 printf("Failed to identify\n");
Adam Ford858dd272019-02-18 17:58:17 -0600219 nand_chip.numchips = 0; /* If fail, don't use nand */
220 return;
Tim Harveydcf40662014-06-02 16:13:18 -0700221 }
222
223 /* allocate and initialize buffers */
224 nand_chip.buffers = memalign(ARCH_DMA_MINALIGN,
225 sizeof(*nand_chip.buffers));
Scott Wood2c1b7e12016-05-30 13:57:55 -0500226 nand_chip.oob_poi = nand_chip.buffers->databuf + mtd->writesize;
Tim Harveydcf40662014-06-02 16:13:18 -0700227 /* setup flash layout (does not scan as we override that) */
Scott Wood2c1b7e12016-05-30 13:57:55 -0500228 mtd->size = nand_chip.chipsize;
229 nand_chip.scan_bbt(mtd);
Adam Ford10210732019-01-02 20:36:52 -0600230 mxs_nand_setup_ecc(mtd);
Tim Harveydcf40662014-06-02 16:13:18 -0700231}
232
Michael Trimarchi95f42382022-05-15 11:35:31 +0200233int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
Tim Harveydcf40662014-06-02 16:13:18 -0700234{
Michael Trimarchi95f42382022-05-15 11:35:31 +0200235 unsigned int sz;
236 unsigned int block, lastblock;
237 unsigned int page, page_offset;
Tim Harveydcf40662014-06-02 16:13:18 -0700238 unsigned int nand_page_per_block;
Michael Trimarchi95f42382022-05-15 11:35:31 +0200239 struct nand_chip *chip;
Ye Licf639232020-05-04 22:08:55 +0800240 u8 *page_buf = NULL;
Tim Harveydcf40662014-06-02 16:13:18 -0700241
Scott Wood17fed142016-05-30 13:57:56 -0500242 chip = mtd_to_nand(mtd);
Adam Ford858dd272019-02-18 17:58:17 -0600243 if (!chip->numchips)
244 return -ENODEV;
Ye Licf639232020-05-04 22:08:55 +0800245
246 page_buf = malloc(mtd->writesize);
247 if (!page_buf)
248 return -ENOMEM;
249
Michael Trimarchi95f42382022-05-15 11:35:31 +0200250 /* offs has to be aligned to a page address! */
251 block = offs / mtd->erasesize;
252 lastblock = (offs + size - 1) / mtd->erasesize;
253 page = (offs % mtd->erasesize) / mtd->writesize;
254 page_offset = offs % mtd->writesize;
Scott Wood2c1b7e12016-05-30 13:57:55 -0500255 nand_page_per_block = mtd->erasesize / mtd->writesize;
Tim Harveydcf40662014-06-02 16:13:18 -0700256
Michael Trimarchi95f42382022-05-15 11:35:31 +0200257 while (block <= lastblock && size > 0) {
258 if (!is_badblock(mtd, mtd->erasesize * block, 1)) {
259 /* Skip bad blocks */
260 while (page < nand_page_per_block) {
261 int curr_page = nand_page_per_block * block + page;
Ye Licf639232020-05-04 22:08:55 +0800262
Michael Trimarchi95f42382022-05-15 11:35:31 +0200263 if (mxs_read_page_ecc(mtd, page_buf, curr_page) < 0) {
Ye Licf639232020-05-04 22:08:55 +0800264 free(page_buf);
Michael Trimarchi95f42382022-05-15 11:35:31 +0200265 return -EIO;
Ye Licf639232020-05-04 22:08:55 +0800266 }
Michael Trimarchi95f42382022-05-15 11:35:31 +0200267
268 if (size > (mtd->writesize - page_offset))
269 sz = (mtd->writesize - page_offset);
270 else
271 sz = size;
272
273 memcpy(dst, page_buf + page_offset, sz);
274 dst += sz;
275 size -= sz;
276 page_offset = 0;
277 page++;
Tim Harveydcf40662014-06-02 16:13:18 -0700278 }
Michael Trimarchi95f42382022-05-15 11:35:31 +0200279
280 page = 0;
281 } else {
282 lastblock++;
Tim Harveydcf40662014-06-02 16:13:18 -0700283 }
Michael Trimarchi95f42382022-05-15 11:35:31 +0200284
285 block++;
Tim Harveydcf40662014-06-02 16:13:18 -0700286 }
287
Ye Licf639232020-05-04 22:08:55 +0800288 free(page_buf);
289
Tim Harveydcf40662014-06-02 16:13:18 -0700290 return 0;
291}
292
293int nand_default_bbt(struct mtd_info *mtd)
294{
295 return 0;
296}
297
Tim Harveydcf40662014-06-02 16:13:18 -0700298void nand_deselect(void)
299{
300}
Ye Li9caf9512021-08-17 17:24:47 +0800301
302u32 nand_spl_adjust_offset(u32 sector, u32 offs)
303{
Michael Trimarchi95f42382022-05-15 11:35:31 +0200304 unsigned int block, lastblock;
305
306 block = sector / mtd->erasesize;
307 lastblock = (sector + offs) / mtd->erasesize;
308
309 while (block <= lastblock) {
310 if (is_badblock(mtd, block * mtd->erasesize, 1)) {
311 offs += mtd->erasesize;
312 lastblock++;
313 }
314
315 block++;
316 }
317
Ye Li9caf9512021-08-17 17:24:47 +0800318 return offs;
319}