blob: 5b189a1d8a595e79f95a395bb7507af3fbd430e8 [file] [log] [blame]
Ilya Yanok15d67a52012-11-06 13:06:34 +00001/*
2 * (C) Copyright 2012
3 * Konstantin Kozhevnikov, Cogent Embedded
4 *
5 * based on nand_spl_simple code
6 *
7 * (C) Copyright 2006-2008
8 * Stefan Roese, DENX Software Engineering, sr@denx.de.
9 *
Wolfgang Denkd79de1d2013-07-08 09:37:19 +020010 * SPDX-License-Identifier: GPL-2.0+
Ilya Yanok15d67a52012-11-06 13:06:34 +000011 */
12
13#include <common.h>
14#include <nand.h>
15#include <asm/io.h>
16#include <linux/mtd/nand_ecc.h>
17
18static int nand_ecc_pos[] = CONFIG_SYS_NAND_ECCPOS;
Scott Wood2c1b7e12016-05-30 13:57:55 -050019static struct mtd_info *mtd;
Ilya Yanok15d67a52012-11-06 13:06:34 +000020static struct nand_chip nand_chip;
21
22#define ECCSTEPS (CONFIG_SYS_NAND_PAGE_SIZE / \
23 CONFIG_SYS_NAND_ECCSIZE)
24#define ECCTOTAL (ECCSTEPS * CONFIG_SYS_NAND_ECCBYTES)
25
26
27/*
28 * NAND command for large page NAND devices (2k)
29 */
30static int nand_command(int block, int page, uint32_t offs,
31 u8 cmd)
32{
Scott Wood17fed142016-05-30 13:57:56 -050033 struct nand_chip *this = mtd_to_nand(mtd);
Ilya Yanok15d67a52012-11-06 13:06:34 +000034 int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT;
35 void (*hwctrl)(struct mtd_info *mtd, int cmd,
36 unsigned int ctrl) = this->cmd_ctrl;
37
Scott Wood2c1b7e12016-05-30 13:57:55 -050038 while (!this->dev_ready(mtd))
Ilya Yanok15d67a52012-11-06 13:06:34 +000039 ;
40
41 /* Emulate NAND_CMD_READOOB */
42 if (cmd == NAND_CMD_READOOB) {
43 offs += CONFIG_SYS_NAND_PAGE_SIZE;
44 cmd = NAND_CMD_READ0;
45 }
46
47 /* Begin command latch cycle */
Scott Wood2c1b7e12016-05-30 13:57:55 -050048 hwctrl(mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
Ilya Yanok15d67a52012-11-06 13:06:34 +000049
50 if (cmd == NAND_CMD_RESET) {
Scott Wood2c1b7e12016-05-30 13:57:55 -050051 hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
Cooper Jr., Franklindc2b9412017-04-06 15:54:14 -050052
53 /*
54 * Apply this short delay always to ensure that we do wait
55 * tWB in any case on any machine.
56 */
57 ndelay(150);
58
Scott Wood2c1b7e12016-05-30 13:57:55 -050059 while (!this->dev_ready(mtd))
Ilya Yanok15d67a52012-11-06 13:06:34 +000060 ;
61 return 0;
62 }
63
64 /* Shift the offset from byte addressing to word addressing. */
Brian Norris67675222014-05-06 00:46:17 +053065 if ((this->options & NAND_BUSWIDTH_16) && !nand_opcode_8bits(cmd))
Ilya Yanok15d67a52012-11-06 13:06:34 +000066 offs >>= 1;
67
68 /* Set ALE and clear CLE to start address cycle */
69 /* Column address */
Scott Wood2c1b7e12016-05-30 13:57:55 -050070 hwctrl(mtd, offs & 0xff,
Ilya Yanok15d67a52012-11-06 13:06:34 +000071 NAND_CTRL_ALE | NAND_CTRL_CHANGE); /* A[7:0] */
Scott Wood2c1b7e12016-05-30 13:57:55 -050072 hwctrl(mtd, (offs >> 8) & 0xff, NAND_CTRL_ALE); /* A[11:9] */
Ilya Yanok15d67a52012-11-06 13:06:34 +000073 /* Row address */
Rostislav Lisovy278d9032014-09-09 15:54:30 +020074 if (cmd != NAND_CMD_RNDOUT) {
Scott Wood2c1b7e12016-05-30 13:57:55 -050075 hwctrl(mtd, (page_addr & 0xff),
Rostislav Lisovy278d9032014-09-09 15:54:30 +020076 NAND_CTRL_ALE); /* A[19:12] */
Scott Wood2c1b7e12016-05-30 13:57:55 -050077 hwctrl(mtd, ((page_addr >> 8) & 0xff),
Ilya Yanok15d67a52012-11-06 13:06:34 +000078 NAND_CTRL_ALE); /* A[27:20] */
79#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE
Rostislav Lisovy278d9032014-09-09 15:54:30 +020080 /* One more address cycle for devices > 128MiB */
Scott Wood2c1b7e12016-05-30 13:57:55 -050081 hwctrl(mtd, (page_addr >> 16) & 0x0f,
Ilya Yanok15d67a52012-11-06 13:06:34 +000082 NAND_CTRL_ALE); /* A[31:28] */
83#endif
Rostislav Lisovy278d9032014-09-09 15:54:30 +020084 }
85
Scott Wood2c1b7e12016-05-30 13:57:55 -050086 hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
Ilya Yanok15d67a52012-11-06 13:06:34 +000087
Ilya Yanok15d67a52012-11-06 13:06:34 +000088
Cooper Jr., Franklindc2b9412017-04-06 15:54:14 -050089 /*
90 * Program and erase have their own busy handlers status, sequential
91 * in and status need no delay.
92 */
93 switch (cmd) {
94 case NAND_CMD_CACHEDPROG:
95 case NAND_CMD_PAGEPROG:
96 case NAND_CMD_ERASE1:
97 case NAND_CMD_ERASE2:
98 case NAND_CMD_SEQIN:
99 case NAND_CMD_RNDIN:
100 case NAND_CMD_STATUS:
101 return 0;
102
103 case NAND_CMD_RNDOUT:
104 /* No ready / busy check necessary */
Scott Wood2c1b7e12016-05-30 13:57:55 -0500105 hwctrl(mtd, NAND_CMD_RNDOUTSTART, NAND_CTRL_CLE |
Cooper Jr., Franklindc2b9412017-04-06 15:54:14 -0500106 NAND_CTRL_CHANGE);
107 hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
108 return 0;
109
110 case NAND_CMD_READ0:
111 /* Latch in address */
112 hwctrl(mtd, NAND_CMD_READSTART,
113 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
Scott Wood2c1b7e12016-05-30 13:57:55 -0500114 hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000115 }
116
Cooper Jr., Franklindc2b9412017-04-06 15:54:14 -0500117 /*
118 * Apply this short delay always to ensure that we do wait tWB in
119 * any case on any machine.
120 */
121 ndelay(150);
122
123 while (!this->dev_ready(mtd))
124 ;
125
Ilya Yanok15d67a52012-11-06 13:06:34 +0000126 return 0;
127}
128
129static int nand_is_bad_block(int block)
130{
Scott Wood17fed142016-05-30 13:57:56 -0500131 struct nand_chip *this = mtd_to_nand(mtd);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000132
133 nand_command(block, 0, CONFIG_SYS_NAND_BAD_BLOCK_POS,
134 NAND_CMD_READOOB);
135
136 /*
137 * Read one byte (or two if it's a 16 bit chip).
138 */
139 if (this->options & NAND_BUSWIDTH_16) {
140 if (readw(this->IO_ADDR_R) != 0xffff)
141 return 1;
142 } else {
143 if (readb(this->IO_ADDR_R) != 0xff)
144 return 1;
145 }
146
147 return 0;
148}
149
150static int nand_read_page(int block, int page, void *dst)
151{
Scott Wood17fed142016-05-30 13:57:56 -0500152 struct nand_chip *this = mtd_to_nand(mtd);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000153 u_char ecc_calc[ECCTOTAL];
154 u_char ecc_code[ECCTOTAL];
155 u_char oob_data[CONFIG_SYS_NAND_OOBSIZE];
156 int i;
157 int eccsize = CONFIG_SYS_NAND_ECCSIZE;
158 int eccbytes = CONFIG_SYS_NAND_ECCBYTES;
159 int eccsteps = ECCSTEPS;
160 uint8_t *p = dst;
161 uint32_t data_pos = 0;
162 uint8_t *oob = &oob_data[0] + nand_ecc_pos[0];
163 uint32_t oob_pos = eccsize * eccsteps + nand_ecc_pos[0];
164
165 nand_command(block, page, 0, NAND_CMD_READ0);
166
167 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
Scott Wood2c1b7e12016-05-30 13:57:55 -0500168 this->ecc.hwctl(mtd, NAND_ECC_READ);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000169 nand_command(block, page, data_pos, NAND_CMD_RNDOUT);
170
Scott Wood2c1b7e12016-05-30 13:57:55 -0500171 this->read_buf(mtd, p, eccsize);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000172
173 nand_command(block, page, oob_pos, NAND_CMD_RNDOUT);
174
Scott Wood2c1b7e12016-05-30 13:57:55 -0500175 this->read_buf(mtd, oob, eccbytes);
176 this->ecc.calculate(mtd, p, &ecc_calc[i]);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000177
178 data_pos += eccsize;
179 oob_pos += eccbytes;
180 oob += eccbytes;
181 }
182
183 /* Pick the ECC bytes out of the oob data */
184 for (i = 0; i < ECCTOTAL; i++)
185 ecc_code[i] = oob_data[nand_ecc_pos[i]];
186
187 eccsteps = ECCSTEPS;
188 p = dst;
189
190 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
191 /* No chance to do something with the possible error message
192 * from correct_data(). We just hope that all possible errors
193 * are corrected by this routine.
194 */
Scott Wood2c1b7e12016-05-30 13:57:55 -0500195 this->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000196 }
197
198 return 0;
199}
200
201int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
202{
203 unsigned int block, lastblock;
Lokesh Vutlabc40d992016-05-24 10:34:41 +0530204 unsigned int page, page_offset;
Ilya Yanok15d67a52012-11-06 13:06:34 +0000205
206 /*
207 * offs has to be aligned to a page address!
208 */
209 block = offs / CONFIG_SYS_NAND_BLOCK_SIZE;
210 lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE;
211 page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE;
Lokesh Vutlabc40d992016-05-24 10:34:41 +0530212 page_offset = offs % CONFIG_SYS_NAND_PAGE_SIZE;
Ilya Yanok15d67a52012-11-06 13:06:34 +0000213
214 while (block <= lastblock) {
215 if (!nand_is_bad_block(block)) {
216 /*
217 * Skip bad blocks
218 */
219 while (page < CONFIG_SYS_NAND_PAGE_COUNT) {
220 nand_read_page(block, page, dst);
Lokesh Vutlabc40d992016-05-24 10:34:41 +0530221 /*
222 * When offs is not aligned to page address the
223 * extra offset is copied to dst as well. Copy
224 * the image such that its first byte will be
225 * at the dst.
226 */
227 if (unlikely(page_offset)) {
228 memmove(dst, dst + page_offset,
229 CONFIG_SYS_NAND_PAGE_SIZE);
230 dst = (void *)((int)dst - page_offset);
231 page_offset = 0;
232 }
Ilya Yanok15d67a52012-11-06 13:06:34 +0000233 dst += CONFIG_SYS_NAND_PAGE_SIZE;
234 page++;
235 }
236
237 page = 0;
238 } else {
239 lastblock++;
240 }
241
242 block++;
243 }
244
245 return 0;
246}
247
248/* nand_init() - initialize data to make nand usable by SPL */
249void nand_init(void)
250{
251 /*
252 * Init board specific nand support
253 */
Boris Brezillon3b5f8842016-06-15 20:56:10 +0200254 mtd = nand_to_mtd(&nand_chip);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000255 nand_chip.IO_ADDR_R = nand_chip.IO_ADDR_W =
256 (void __iomem *)CONFIG_SYS_NAND_BASE;
257 board_nand_init(&nand_chip);
258
259 if (nand_chip.select_chip)
Scott Wood2c1b7e12016-05-30 13:57:55 -0500260 nand_chip.select_chip(mtd, 0);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000261
262 /* NAND chip may require reset after power-on */
263 nand_command(0, 0, 0, NAND_CMD_RESET);
264}
265
266/* Unselect after operation */
267void nand_deselect(void)
268{
269 if (nand_chip.select_chip)
Scott Wood2c1b7e12016-05-30 13:57:55 -0500270 nand_chip.select_chip(mtd, -1);
Ilya Yanok15d67a52012-11-06 13:06:34 +0000271}