blob: 4d2712df4c7a483243943171277e1f80dda15698 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/pxa3xx_nand.c
Stefan Roese75659da2015-07-23 10:26:16 +02004 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090013#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020014#include <asm/io.h>
15#include <asm/arch/cpu.h>
16#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090017#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020018#include <linux/types.h>
19
20#include "pxa3xx_nand.h"
21
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030022DECLARE_GLOBAL_DATA_PTR;
23
Stefan Roese75659da2015-07-23 10:26:16 +020024#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25#define CHIP_DELAY_TIMEOUT 200
26#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020027
28/*
29 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030030 * STATUS, READID and PARAM.
31 * ONFI param page is 256 bytes, and there are three redundant copies
32 * to be read. JEDEC param page is 512 bytes, and there are also three
33 * redundant copies to be read.
34 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020035 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030036#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020037
38/* registers and bit definitions */
39#define NDCR (0x00) /* Control register */
40#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
41#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
42#define NDSR (0x14) /* Status Register */
43#define NDPCR (0x18) /* Page Count Register */
44#define NDBDR0 (0x1C) /* Bad Block Register 0 */
45#define NDBDR1 (0x20) /* Bad Block Register 1 */
46#define NDECCCTRL (0x28) /* ECC control */
47#define NDDB (0x40) /* Data Buffer */
48#define NDCB0 (0x48) /* Command Buffer0 */
49#define NDCB1 (0x4C) /* Command Buffer1 */
50#define NDCB2 (0x50) /* Command Buffer2 */
51
52#define NDCR_SPARE_EN (0x1 << 31)
53#define NDCR_ECC_EN (0x1 << 30)
54#define NDCR_DMA_EN (0x1 << 29)
55#define NDCR_ND_RUN (0x1 << 28)
56#define NDCR_DWIDTH_C (0x1 << 27)
57#define NDCR_DWIDTH_M (0x1 << 26)
58#define NDCR_PAGE_SZ (0x1 << 24)
59#define NDCR_NCSX (0x1 << 23)
60#define NDCR_ND_MODE (0x3 << 21)
61#define NDCR_NAND_MODE (0x0)
62#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030063#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020064#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
65#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
66
67#define NDCR_RA_START (0x1 << 15)
68#define NDCR_PG_PER_BLK (0x1 << 14)
69#define NDCR_ND_ARB_EN (0x1 << 12)
70#define NDCR_INT_MASK (0xFFF)
71
72#define NDSR_MASK (0xfff)
73#define NDSR_ERR_CNT_OFF (16)
74#define NDSR_ERR_CNT_MASK (0x1f)
75#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
76#define NDSR_RDY (0x1 << 12)
77#define NDSR_FLASH_RDY (0x1 << 11)
78#define NDSR_CS0_PAGED (0x1 << 10)
79#define NDSR_CS1_PAGED (0x1 << 9)
80#define NDSR_CS0_CMDD (0x1 << 8)
81#define NDSR_CS1_CMDD (0x1 << 7)
82#define NDSR_CS0_BBD (0x1 << 6)
83#define NDSR_CS1_BBD (0x1 << 5)
84#define NDSR_UNCORERR (0x1 << 4)
85#define NDSR_CORERR (0x1 << 3)
86#define NDSR_WRDREQ (0x1 << 2)
87#define NDSR_RDDREQ (0x1 << 1)
88#define NDSR_WRCMDREQ (0x1)
89
90#define NDCB0_LEN_OVRD (0x1 << 28)
91#define NDCB0_ST_ROW_EN (0x1 << 26)
92#define NDCB0_AUTO_RS (0x1 << 25)
93#define NDCB0_CSEL (0x1 << 24)
94#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
95#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
96#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
97#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
98#define NDCB0_NC (0x1 << 20)
99#define NDCB0_DBC (0x1 << 19)
100#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
101#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
102#define NDCB0_CMD2_MASK (0xff << 8)
103#define NDCB0_CMD1_MASK (0xff)
104#define NDCB0_ADDR_CYC_SHIFT (16)
105
106#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
107#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
108#define EXT_CMD_TYPE_READ 4 /* Read */
109#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
110#define EXT_CMD_TYPE_FINAL 3 /* Final command */
111#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
112#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
113
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300114/*
115 * This should be large enough to read 'ONFI' and 'JEDEC'.
116 * Let's use 7 bytes, which is the maximum ID count supported
117 * by the controller (see NDCR_RD_ID_CNT_MASK).
118 */
119#define READ_ID_BYTES 7
120
Stefan Roese75659da2015-07-23 10:26:16 +0200121/* macros for registers read/write */
122#define nand_writel(info, off, val) \
123 writel((val), (info)->mmio_base + (off))
124
125#define nand_readl(info, off) \
126 readl((info)->mmio_base + (off))
127
128/* error code and state */
129enum {
130 ERR_NONE = 0,
131 ERR_DMABUSERR = -1,
132 ERR_SENDCMD = -2,
133 ERR_UNCORERR = -3,
134 ERR_BBERR = -4,
135 ERR_CORERR = -5,
136};
137
138enum {
139 STATE_IDLE = 0,
140 STATE_PREPARED,
141 STATE_CMD_HANDLE,
142 STATE_DMA_READING,
143 STATE_DMA_WRITING,
144 STATE_DMA_DONE,
145 STATE_PIO_READING,
146 STATE_PIO_WRITING,
147 STATE_CMD_DONE,
148 STATE_READY,
149};
150
151enum pxa3xx_nand_variant {
152 PXA3XX_NAND_VARIANT_PXA,
153 PXA3XX_NAND_VARIANT_ARMADA370,
154};
155
156struct pxa3xx_nand_host {
157 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200158 void *info_data;
159
160 /* page size of attached chip */
161 int use_ecc;
162 int cs;
163
164 /* calculated from pxa3xx_nand_flash data */
165 unsigned int col_addr_cycles;
166 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200167};
168
169struct pxa3xx_nand_info {
170 struct nand_hw_control controller;
171 struct pxa3xx_nand_platform_data *pdata;
172
173 struct clk *clk;
174 void __iomem *mmio_base;
175 unsigned long mmio_phys;
176 int cmd_complete, dev_ready;
177
178 unsigned int buf_start;
179 unsigned int buf_count;
180 unsigned int buf_size;
181 unsigned int data_buff_pos;
182 unsigned int oob_buff_pos;
183
184 unsigned char *data_buff;
185 unsigned char *oob_buff;
186
187 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
188 unsigned int state;
189
190 /*
191 * This driver supports NFCv1 (as found in PXA SoC)
192 * and NFCv2 (as found in Armada 370/XP SoC).
193 */
194 enum pxa3xx_nand_variant variant;
195
196 int cs;
197 int use_ecc; /* use HW ECC ? */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200198 int force_raw; /* prevent use_ecc to be set */
Stefan Roese75659da2015-07-23 10:26:16 +0200199 int ecc_bch; /* using BCH ECC? */
200 int use_spare; /* use spare ? */
201 int need_wait;
202
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300203 /* Amount of real data per full chunk */
204 unsigned int chunk_size;
205
206 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200207 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300208
209 /* Number of full chunks (i.e chunk_size + spare_size) */
210 unsigned int nfullchunks;
211
212 /*
213 * Total number of chunks. If equal to nfullchunks, then there
214 * are only full chunks. Otherwise, there is one last chunk of
215 * size (last_chunk_size + last_spare_size)
216 */
217 unsigned int ntotalchunks;
218
219 /* Amount of real data in the last chunk */
220 unsigned int last_chunk_size;
221
222 /* Amount of spare data in the last chunk */
223 unsigned int last_spare_size;
224
Stefan Roese75659da2015-07-23 10:26:16 +0200225 unsigned int ecc_size;
226 unsigned int ecc_err_cnt;
227 unsigned int max_bitflips;
228 int retcode;
229
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300230 /*
231 * Variables only valid during command
232 * execution. step_chunk_size and step_spare_size is the
233 * amount of real data and spare data in the current
234 * chunk. cur_chunk is the current chunk being
235 * read/programmed.
236 */
237 unsigned int step_chunk_size;
238 unsigned int step_spare_size;
239 unsigned int cur_chunk;
240
Stefan Roese75659da2015-07-23 10:26:16 +0200241 /* cached register value */
242 uint32_t reg_ndcr;
243 uint32_t ndtr0cs0;
244 uint32_t ndtr1cs0;
245
246 /* generated NDCBx register values */
247 uint32_t ndcb0;
248 uint32_t ndcb1;
249 uint32_t ndcb2;
250 uint32_t ndcb3;
251};
252
253static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300254 /*
255 * tCH Enable signal hold time
256 * tCS Enable signal setup time
257 * tWH ND_nWE high duration
258 * tWP ND_nWE pulse time
259 * tRH ND_nRE high duration
260 * tRP ND_nRE pulse width
261 * tR ND_nWE high to ND_nRE low for read
262 * tWHR ND_nWE high to ND_nRE low for status read
263 * tAR ND_ALE low to ND_nRE low delay
264 */
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300265 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200266 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
267 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
268 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
269 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300270 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200271};
272
273static struct pxa3xx_nand_flash builtin_flash_types[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300274 /*
275 * chip_id
276 * flash_width Width of Flash memory (DWIDTH_M)
277 * dfc_width Width of flash controller(DWIDTH_C)
278 * *timing
279 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
280 */
Stefan Roese75659da2015-07-23 10:26:16 +0200281 { 0x46ec, 16, 16, &timing[1] },
282 { 0xdaec, 8, 8, &timing[1] },
283 { 0xd7ec, 8, 8, &timing[1] },
284 { 0xa12c, 8, 8, &timing[2] },
285 { 0xb12c, 16, 16, &timing[2] },
286 { 0xdc2c, 8, 8, &timing[2] },
287 { 0xcc2c, 16, 16, &timing[2] },
288 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300289 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200290};
291
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100292#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200293static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
294static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
295
296static struct nand_bbt_descr bbt_main_descr = {
297 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
298 | NAND_BBT_2BIT | NAND_BBT_VERSION,
299 .offs = 8,
300 .len = 6,
301 .veroffs = 14,
302 .maxblocks = 8, /* Last 8 blocks in each chip */
303 .pattern = bbt_pattern
304};
305
306static struct nand_bbt_descr bbt_mirror_descr = {
307 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
308 | NAND_BBT_2BIT | NAND_BBT_VERSION,
309 .offs = 8,
310 .len = 6,
311 .veroffs = 14,
312 .maxblocks = 8, /* Last 8 blocks in each chip */
313 .pattern = bbt_mirror_pattern
314};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100315#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200316
317static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
318 .eccbytes = 32,
319 .eccpos = {
320 32, 33, 34, 35, 36, 37, 38, 39,
321 40, 41, 42, 43, 44, 45, 46, 47,
322 48, 49, 50, 51, 52, 53, 54, 55,
323 56, 57, 58, 59, 60, 61, 62, 63},
324 .oobfree = { {2, 30} }
325};
326
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300327static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
328 .eccbytes = 64,
329 .eccpos = {
Miquel Raynal53e9c122018-10-11 17:45:44 +0200330 32, 33, 34, 35, 36, 37, 38, 39,
331 40, 41, 42, 43, 44, 45, 46, 47,
332 48, 49, 50, 51, 52, 53, 54, 55,
333 56, 57, 58, 59, 60, 61, 62, 63,
334 64, 65, 66, 67, 68, 69, 70, 71,
335 72, 73, 74, 75, 76, 77, 78, 79,
336 80, 81, 82, 83, 84, 85, 86, 87,
337 88, 89, 90, 91, 92, 93, 94, 95},
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300338 .oobfree = { {1, 4}, {6, 26} }
339};
340
Stefan Roese75659da2015-07-23 10:26:16 +0200341static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
342 .eccbytes = 64,
343 .eccpos = {
344 32, 33, 34, 35, 36, 37, 38, 39,
345 40, 41, 42, 43, 44, 45, 46, 47,
346 48, 49, 50, 51, 52, 53, 54, 55,
347 56, 57, 58, 59, 60, 61, 62, 63,
348 96, 97, 98, 99, 100, 101, 102, 103,
349 104, 105, 106, 107, 108, 109, 110, 111,
350 112, 113, 114, 115, 116, 117, 118, 119,
351 120, 121, 122, 123, 124, 125, 126, 127},
352 /* Bootrom looks in bytes 0 & 5 for bad blocks */
353 .oobfree = { {6, 26}, { 64, 32} }
354};
355
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300356static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
357 .eccbytes = 128,
358 .eccpos = {
359 32, 33, 34, 35, 36, 37, 38, 39,
360 40, 41, 42, 43, 44, 45, 46, 47,
361 48, 49, 50, 51, 52, 53, 54, 55,
362 56, 57, 58, 59, 60, 61, 62, 63,
363
364 96, 97, 98, 99, 100, 101, 102, 103,
365 104, 105, 106, 107, 108, 109, 110, 111,
366 112, 113, 114, 115, 116, 117, 118, 119,
367 120, 121, 122, 123, 124, 125, 126, 127,
368
369 160, 161, 162, 163, 164, 165, 166, 167,
370 168, 169, 170, 171, 172, 173, 174, 175,
371 176, 177, 178, 179, 180, 181, 182, 183,
372 184, 185, 186, 187, 188, 189, 190, 191,
373
374 224, 225, 226, 227, 228, 229, 230, 231,
375 232, 233, 234, 235, 236, 237, 238, 239,
376 240, 241, 242, 243, 244, 245, 246, 247,
377 248, 249, 250, 251, 252, 253, 254, 255},
378
379 /* Bootrom looks in bytes 0 & 5 for bad blocks */
380 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
381};
382
Stefan Roese75659da2015-07-23 10:26:16 +0200383static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
384 .eccbytes = 128,
385 .eccpos = {
386 32, 33, 34, 35, 36, 37, 38, 39,
387 40, 41, 42, 43, 44, 45, 46, 47,
388 48, 49, 50, 51, 52, 53, 54, 55,
389 56, 57, 58, 59, 60, 61, 62, 63},
390 .oobfree = { }
391};
392
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300393static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
394 .eccbytes = 256,
395 .eccpos = {},
396 /* HW ECC handles all ECC data and all spare area is free for OOB */
397 .oobfree = {{0, 160} }
398};
399
Stefan Roese75659da2015-07-23 10:26:16 +0200400#define NDTR0_tCH(c) (min((c), 7) << 19)
401#define NDTR0_tCS(c) (min((c), 7) << 16)
402#define NDTR0_tWH(c) (min((c), 7) << 11)
403#define NDTR0_tWP(c) (min((c), 7) << 8)
404#define NDTR0_tRH(c) (min((c), 7) << 3)
405#define NDTR0_tRP(c) (min((c), 7) << 0)
406
407#define NDTR1_tR(c) (min((c), 65535) << 16)
408#define NDTR1_tWHR(c) (min((c), 15) << 4)
409#define NDTR1_tAR(c) (min((c), 15) << 0)
410
411/* convert nano-seconds to nand flash controller clock cycles */
412#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
413
414static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
415{
416 /* We only support the Armada 370/XP/38x for now */
417 return PXA3XX_NAND_VARIANT_ARMADA370;
418}
419
420static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
421 const struct pxa3xx_nand_timing *t)
422{
423 struct pxa3xx_nand_info *info = host->info_data;
424 unsigned long nand_clk = mvebu_get_nand_clock();
425 uint32_t ndtr0, ndtr1;
426
427 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
428 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
429 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
430 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
431 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
432 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
433
434 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
435 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
436 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
437
438 info->ndtr0cs0 = ndtr0;
439 info->ndtr1cs0 = ndtr1;
440 nand_writel(info, NDTR0CS0, ndtr0);
441 nand_writel(info, NDTR1CS0, ndtr1);
442}
443
444static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
445 const struct nand_sdr_timings *t)
446{
447 struct pxa3xx_nand_info *info = host->info_data;
448 struct nand_chip *chip = &host->chip;
449 unsigned long nand_clk = mvebu_get_nand_clock();
450 uint32_t ndtr0, ndtr1;
451
452 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
453 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
454 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300455 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200456 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300457 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200458 u32 tR = chip->chip_delay * 1000;
459 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
460 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
461
462 /* fallback to a default value if tR = 0 */
463 if (!tR)
464 tR = 20000;
465
466 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
467 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
468 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
469 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
470 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
471 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
472
473 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
474 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
475 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
476
477 info->ndtr0cs0 = ndtr0;
478 info->ndtr1cs0 = ndtr1;
479 nand_writel(info, NDTR0CS0, ndtr0);
480 nand_writel(info, NDTR1CS0, ndtr1);
481}
482
483static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
484{
485 const struct nand_sdr_timings *timings;
486 struct nand_chip *chip = &host->chip;
487 struct pxa3xx_nand_info *info = host->info_data;
488 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300489 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200490 int mode, id, ntypes, i;
491
492 mode = onfi_get_async_timing_mode(chip);
493 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
494 ntypes = ARRAY_SIZE(builtin_flash_types);
495
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300496 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200497
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300498 id = chip->read_byte(mtd);
499 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200500
501 for (i = 0; i < ntypes; i++) {
502 f = &builtin_flash_types[i];
503
504 if (f->chip_id == id)
505 break;
506 }
507
508 if (i == ntypes) {
509 dev_err(&info->pdev->dev, "Error: timings not found\n");
510 return -EINVAL;
511 }
512
513 pxa3xx_nand_set_timing(host, f->timing);
514
515 if (f->flash_width == 16) {
516 info->reg_ndcr |= NDCR_DWIDTH_M;
517 chip->options |= NAND_BUSWIDTH_16;
518 }
519
520 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
521 } else {
522 mode = fls(mode) - 1;
523 if (mode < 0)
524 mode = 0;
525
526 timings = onfi_async_timing_mode_to_sdr_timings(mode);
527 if (IS_ERR(timings))
528 return PTR_ERR(timings);
529
530 pxa3xx_nand_set_sdr_timing(host, timings);
531 }
532
533 return 0;
534}
535
Stefan Roese75659da2015-07-23 10:26:16 +0200536/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800537 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200538 * command buffer, otherwise, it does not work.
539 * We enable all the interrupt at the same time, and
540 * let pxa3xx_nand_irq to handle all logic.
541 */
542static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
543{
544 uint32_t ndcr;
545
546 ndcr = info->reg_ndcr;
547
548 if (info->use_ecc) {
549 ndcr |= NDCR_ECC_EN;
550 if (info->ecc_bch)
551 nand_writel(info, NDECCCTRL, 0x1);
552 } else {
553 ndcr &= ~NDCR_ECC_EN;
554 if (info->ecc_bch)
555 nand_writel(info, NDECCCTRL, 0x0);
556 }
557
558 ndcr &= ~NDCR_DMA_EN;
559
560 if (info->use_spare)
561 ndcr |= NDCR_SPARE_EN;
562 else
563 ndcr &= ~NDCR_SPARE_EN;
564
565 ndcr |= NDCR_ND_RUN;
566
567 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200568 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300569 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200570 nand_writel(info, NDCR, ndcr);
571}
572
573static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
574{
575 uint32_t ndcr;
576
577 ndcr = nand_readl(info, NDCR);
578 nand_writel(info, NDCR, ndcr | int_mask);
579}
580
581static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
582{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200583 if (info->ecc_bch && !info->force_raw) {
Stefan Roese75659da2015-07-23 10:26:16 +0200584 u32 ts;
585
586 /*
587 * According to the datasheet, when reading from NDDB
588 * with BCH enabled, after each 32 bytes reads, we
589 * have to make sure that the NDSR.RDDREQ bit is set.
590 *
591 * Drain the FIFO 8 32 bits reads at a time, and skip
592 * the polling on the last read.
593 */
594 while (len > 8) {
595 readsl(info->mmio_base + NDDB, data, 8);
596
597 ts = get_timer(0);
598 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
599 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
600 dev_err(&info->pdev->dev,
601 "Timeout on RDDREQ while draining the FIFO\n");
602 return;
603 }
604 }
605
606 data += 32;
607 len -= 8;
608 }
609 }
610
611 readsl(info->mmio_base + NDDB, data, len);
612}
613
614static void handle_data_pio(struct pxa3xx_nand_info *info)
615{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200616 int data_len = info->step_chunk_size;
617
618 /*
619 * In raw mode, include the spare area and the ECC bytes that are not
620 * consumed by the controller in the data section. Do not reorganize
621 * here, do it in the ->read_page_raw() handler instead.
622 */
623 if (info->force_raw)
624 data_len += info->step_spare_size + info->ecc_size;
625
Stefan Roese75659da2015-07-23 10:26:16 +0200626 switch (info->state) {
627 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300628 if (info->step_chunk_size)
629 writesl(info->mmio_base + NDDB,
630 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200631 DIV_ROUND_UP(data_len, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200632
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300633 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200634 writesl(info->mmio_base + NDDB,
635 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300636 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200637 break;
638 case STATE_PIO_READING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300639 if (info->step_chunk_size)
640 drain_fifo(info,
641 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200642 DIV_ROUND_UP(data_len, 4));
643
644 if (info->force_raw)
645 break;
Stefan Roese75659da2015-07-23 10:26:16 +0200646
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300647 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200648 drain_fifo(info,
649 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300650 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200651 break;
652 default:
653 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300654 info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200655 BUG();
656 }
657
658 /* Update buffer pointers for multi-page read/write */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200659 info->data_buff_pos += data_len;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300660 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200661}
662
663static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
664{
665 handle_data_pio(info);
666
667 info->state = STATE_CMD_DONE;
668 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
669}
670
671static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
672{
673 unsigned int status, is_completed = 0, is_ready = 0;
674 unsigned int ready, cmd_done;
675 irqreturn_t ret = IRQ_HANDLED;
676
677 if (info->cs == 0) {
678 ready = NDSR_FLASH_RDY;
679 cmd_done = NDSR_CS0_CMDD;
680 } else {
681 ready = NDSR_RDY;
682 cmd_done = NDSR_CS1_CMDD;
683 }
684
David Sniatkiwicz2087f7e2018-08-29 11:56:18 +0300685 /* TODO - find out why we need the delay during write operation. */
686 ndelay(1);
687
Stefan Roese75659da2015-07-23 10:26:16 +0200688 status = nand_readl(info, NDSR);
689
690 if (status & NDSR_UNCORERR)
691 info->retcode = ERR_UNCORERR;
692 if (status & NDSR_CORERR) {
693 info->retcode = ERR_CORERR;
694 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
695 info->ecc_bch)
696 info->ecc_err_cnt = NDSR_ERR_CNT(status);
697 else
698 info->ecc_err_cnt = 1;
699
700 /*
701 * Each chunk composing a page is corrected independently,
702 * and we need to store maximum number of corrected bitflips
703 * to return it to the MTD layer in ecc.read_page().
704 */
705 info->max_bitflips = max_t(unsigned int,
706 info->max_bitflips,
707 info->ecc_err_cnt);
708 }
709 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
710 info->state = (status & NDSR_RDDREQ) ?
711 STATE_PIO_READING : STATE_PIO_WRITING;
712 /* Call the IRQ thread in U-Boot directly */
713 pxa3xx_nand_irq_thread(info);
714 return 0;
715 }
716 if (status & cmd_done) {
717 info->state = STATE_CMD_DONE;
718 is_completed = 1;
719 }
720 if (status & ready) {
721 info->state = STATE_READY;
722 is_ready = 1;
723 }
724
Ofer Heifetzde323162018-08-29 11:56:04 +0300725 /*
726 * Clear all status bit before issuing the next command, which
727 * can and will alter the status bits and will deserve a new
728 * interrupt on its own. This lets the controller exit the IRQ
729 */
730 nand_writel(info, NDSR, status);
731
Stefan Roese75659da2015-07-23 10:26:16 +0200732 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200733 status &= ~NDSR_WRCMDREQ;
734 info->state = STATE_CMD_HANDLE;
735
736 /*
737 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
738 * must be loaded by writing directly either 12 or 16
739 * bytes directly to NDCB0, four bytes at a time.
740 *
741 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
742 * but each NDCBx register can be read.
743 */
744 nand_writel(info, NDCB0, info->ndcb0);
745 nand_writel(info, NDCB0, info->ndcb1);
746 nand_writel(info, NDCB0, info->ndcb2);
747
748 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
749 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
750 nand_writel(info, NDCB0, info->ndcb3);
751 }
752
Stefan Roese75659da2015-07-23 10:26:16 +0200753 if (is_completed)
754 info->cmd_complete = 1;
755 if (is_ready)
756 info->dev_ready = 1;
757
758 return ret;
759}
760
761static inline int is_buf_blank(uint8_t *buf, size_t len)
762{
763 for (; len > 0; len--)
764 if (*buf++ != 0xff)
765 return 0;
766 return 1;
767}
768
769static void set_command_address(struct pxa3xx_nand_info *info,
770 unsigned int page_size, uint16_t column, int page_addr)
771{
772 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300773 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200774 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
775 | (column & 0xFF);
776
777 info->ndcb2 = 0;
778 } else {
779 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
780 | (column & 0xFFFF);
781
782 if (page_addr & 0xFF0000)
783 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
784 else
785 info->ndcb2 = 0;
786 }
787}
788
789static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
790{
791 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300792 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200793
794 /* reset data and oob column point to handle data */
795 info->buf_start = 0;
796 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200797 info->data_buff_pos = 0;
798 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300799 info->step_chunk_size = 0;
800 info->step_spare_size = 0;
801 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200802 info->use_ecc = 0;
803 info->use_spare = 1;
804 info->retcode = ERR_NONE;
805 info->ecc_err_cnt = 0;
806 info->ndcb3 = 0;
807 info->need_wait = 0;
808
809 switch (command) {
810 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300811 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200812 case NAND_CMD_PAGEPROG:
Miquel Raynal30a016a2018-10-11 17:45:42 +0200813 if (!info->force_raw)
814 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200815 break;
816 case NAND_CMD_PARAM:
817 info->use_spare = 0;
818 break;
819 default:
820 info->ndcb1 = 0;
821 info->ndcb2 = 0;
822 break;
823 }
824
825 /*
826 * If we are about to issue a read command, or about to set
827 * the write address, then clean the data buffer.
828 */
829 if (command == NAND_CMD_READ0 ||
830 command == NAND_CMD_READOOB ||
831 command == NAND_CMD_SEQIN) {
832 info->buf_count = mtd->writesize + mtd->oobsize;
833 memset(info->data_buff, 0xFF, info->buf_count);
834 }
835}
836
837static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
838 int ext_cmd_type, uint16_t column, int page_addr)
839{
840 int addr_cycle, exec_cmd;
841 struct pxa3xx_nand_host *host;
842 struct mtd_info *mtd;
843
844 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300845 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200846 addr_cycle = 0;
847 exec_cmd = 1;
848
849 if (info->cs != 0)
850 info->ndcb0 = NDCB0_CSEL;
851 else
852 info->ndcb0 = 0;
853
854 if (command == NAND_CMD_SEQIN)
855 exec_cmd = 0;
856
857 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
858 + host->col_addr_cycles);
859
860 switch (command) {
861 case NAND_CMD_READOOB:
862 case NAND_CMD_READ0:
863 info->buf_start = column;
864 info->ndcb0 |= NDCB0_CMD_TYPE(0)
865 | addr_cycle
866 | NAND_CMD_READ0;
867
868 if (command == NAND_CMD_READOOB)
869 info->buf_start += mtd->writesize;
870
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300871 if (info->cur_chunk < info->nfullchunks) {
872 info->step_chunk_size = info->chunk_size;
873 info->step_spare_size = info->spare_size;
874 } else {
875 info->step_chunk_size = info->last_chunk_size;
876 info->step_spare_size = info->last_spare_size;
877 }
878
Stefan Roese75659da2015-07-23 10:26:16 +0200879 /*
880 * Multiple page read needs an 'extended command type' field,
881 * which is either naked-read or last-read according to the
882 * state.
883 */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200884 if (info->force_raw) {
885 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
886 NDCB0_LEN_OVRD |
887 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
888 info->ndcb3 = info->step_chunk_size +
889 info->step_spare_size + info->ecc_size;
890 } else if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200891 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300892 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200893 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
894 | NDCB0_LEN_OVRD
895 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300896 info->ndcb3 = info->step_chunk_size +
897 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200898 }
899
900 set_command_address(info, mtd->writesize, column, page_addr);
901 break;
902
903 case NAND_CMD_SEQIN:
904
905 info->buf_start = column;
906 set_command_address(info, mtd->writesize, 0, page_addr);
907
908 /*
909 * Multiple page programming needs to execute the initial
910 * SEQIN command that sets the page address.
911 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300912 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200913 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
914 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
915 | addr_cycle
916 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200917 exec_cmd = 1;
918 }
919 break;
920
921 case NAND_CMD_PAGEPROG:
922 if (is_buf_blank(info->data_buff,
923 (mtd->writesize + mtd->oobsize))) {
924 exec_cmd = 0;
925 break;
926 }
927
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300928 if (info->cur_chunk < info->nfullchunks) {
929 info->step_chunk_size = info->chunk_size;
930 info->step_spare_size = info->spare_size;
931 } else {
932 info->step_chunk_size = info->last_chunk_size;
933 info->step_spare_size = info->last_spare_size;
934 }
935
Stefan Roese75659da2015-07-23 10:26:16 +0200936 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300937 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200938 /*
939 * Multiple page write uses the 'extended command'
940 * field. This can be used to issue a command dispatch
941 * or a naked-write depending on the current stage.
942 */
943 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
944 | NDCB0_LEN_OVRD
945 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300946 info->ndcb3 = info->step_chunk_size +
947 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200948
949 /*
950 * This is the command dispatch that completes a chunked
951 * page program operation.
952 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300953 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200954 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
955 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
956 | command;
957 info->ndcb1 = 0;
958 info->ndcb2 = 0;
959 info->ndcb3 = 0;
960 }
961 } else {
962 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
963 | NDCB0_AUTO_RS
964 | NDCB0_ST_ROW_EN
965 | NDCB0_DBC
966 | (NAND_CMD_PAGEPROG << 8)
967 | NAND_CMD_SEQIN
968 | addr_cycle;
969 }
970 break;
971
972 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300973 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200974 info->ndcb0 |= NDCB0_CMD_TYPE(0)
975 | NDCB0_ADDR_CYC(1)
976 | NDCB0_LEN_OVRD
977 | command;
978 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300979 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300980 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200981 break;
982
983 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300984 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +0200985 info->ndcb0 |= NDCB0_CMD_TYPE(3)
986 | NDCB0_ADDR_CYC(1)
987 | command;
988 info->ndcb1 = (column & 0xFF);
989
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300990 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200991 break;
992 case NAND_CMD_STATUS:
993 info->buf_count = 1;
994 info->ndcb0 |= NDCB0_CMD_TYPE(4)
995 | NDCB0_ADDR_CYC(1)
996 | command;
997
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300998 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200999 break;
1000
1001 case NAND_CMD_ERASE1:
1002 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1003 | NDCB0_AUTO_RS
1004 | NDCB0_ADDR_CYC(3)
1005 | NDCB0_DBC
1006 | (NAND_CMD_ERASE2 << 8)
1007 | NAND_CMD_ERASE1;
1008 info->ndcb1 = page_addr;
1009 info->ndcb2 = 0;
1010
1011 break;
1012 case NAND_CMD_RESET:
1013 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1014 | command;
1015
1016 break;
1017
1018 case NAND_CMD_ERASE2:
1019 exec_cmd = 0;
1020 break;
1021
1022 default:
1023 exec_cmd = 0;
1024 dev_err(&info->pdev->dev, "non-supported command %x\n",
1025 command);
1026 break;
1027 }
1028
1029 return exec_cmd;
1030}
1031
1032static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1033 int column, int page_addr)
1034{
Scott Wood17fed142016-05-30 13:57:56 -05001035 struct nand_chip *chip = mtd_to_nand(mtd);
1036 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001037 struct pxa3xx_nand_info *info = host->info_data;
1038 int exec_cmd;
1039
1040 /*
1041 * if this is a x16 device ,then convert the input
1042 * "byte" address into a "word" address appropriate
1043 * for indexing a word-oriented device
1044 */
1045 if (info->reg_ndcr & NDCR_DWIDTH_M)
1046 column /= 2;
1047
1048 /*
1049 * There may be different NAND chip hooked to
1050 * different chip select, so check whether
1051 * chip select has been changed, if yes, reset the timing
1052 */
1053 if (info->cs != host->cs) {
1054 info->cs = host->cs;
1055 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1056 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1057 }
1058
1059 prepare_start_command(info, command);
1060
1061 info->state = STATE_PREPARED;
1062 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1063
1064 if (exec_cmd) {
1065 u32 ts;
1066
1067 info->cmd_complete = 0;
1068 info->dev_ready = 0;
1069 info->need_wait = 1;
1070 pxa3xx_nand_start(info);
1071
1072 ts = get_timer(0);
1073 while (1) {
1074 u32 status;
1075
1076 status = nand_readl(info, NDSR);
1077 if (status)
1078 pxa3xx_nand_irq(info);
1079
1080 if (info->cmd_complete)
1081 break;
1082
1083 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1084 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1085 return;
1086 }
1087 }
1088 }
1089 info->state = STATE_IDLE;
1090}
1091
1092static void nand_cmdfunc_extended(struct mtd_info *mtd,
1093 const unsigned command,
1094 int column, int page_addr)
1095{
Scott Wood17fed142016-05-30 13:57:56 -05001096 struct nand_chip *chip = mtd_to_nand(mtd);
1097 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001098 struct pxa3xx_nand_info *info = host->info_data;
1099 int exec_cmd, ext_cmd_type;
1100
1101 /*
1102 * if this is a x16 device then convert the input
1103 * "byte" address into a "word" address appropriate
1104 * for indexing a word-oriented device
1105 */
1106 if (info->reg_ndcr & NDCR_DWIDTH_M)
1107 column /= 2;
1108
1109 /*
1110 * There may be different NAND chip hooked to
1111 * different chip select, so check whether
1112 * chip select has been changed, if yes, reset the timing
1113 */
1114 if (info->cs != host->cs) {
1115 info->cs = host->cs;
1116 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1117 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1118 }
1119
1120 /* Select the extended command for the first command */
1121 switch (command) {
1122 case NAND_CMD_READ0:
1123 case NAND_CMD_READOOB:
1124 ext_cmd_type = EXT_CMD_TYPE_MONO;
1125 break;
1126 case NAND_CMD_SEQIN:
1127 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1128 break;
1129 case NAND_CMD_PAGEPROG:
1130 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1131 break;
1132 default:
1133 ext_cmd_type = 0;
1134 break;
1135 }
1136
1137 prepare_start_command(info, command);
1138
1139 /*
1140 * Prepare the "is ready" completion before starting a command
1141 * transaction sequence. If the command is not executed the
1142 * completion will be completed, see below.
1143 *
1144 * We can do that inside the loop because the command variable
1145 * is invariant and thus so is the exec_cmd.
1146 */
1147 info->need_wait = 1;
1148 info->dev_ready = 0;
1149
1150 do {
1151 u32 ts;
1152
1153 info->state = STATE_PREPARED;
1154 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1155 column, page_addr);
1156 if (!exec_cmd) {
1157 info->need_wait = 0;
1158 info->dev_ready = 1;
1159 break;
1160 }
1161
1162 info->cmd_complete = 0;
1163 pxa3xx_nand_start(info);
1164
1165 ts = get_timer(0);
1166 while (1) {
1167 u32 status;
1168
1169 status = nand_readl(info, NDSR);
1170 if (status)
1171 pxa3xx_nand_irq(info);
1172
1173 if (info->cmd_complete)
1174 break;
1175
1176 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1177 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1178 return;
1179 }
1180 }
1181
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001182 /* Only a few commands need several steps */
1183 if (command != NAND_CMD_PAGEPROG &&
1184 command != NAND_CMD_READ0 &&
1185 command != NAND_CMD_READOOB)
1186 break;
1187
1188 info->cur_chunk++;
1189
Stefan Roese75659da2015-07-23 10:26:16 +02001190 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001191 if (info->cur_chunk == info->ntotalchunks &&
1192 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001193 break;
1194
1195 /*
1196 * After a splitted program command sequence has issued
1197 * the command dispatch, the command sequence is complete.
1198 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001199 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001200 command == NAND_CMD_PAGEPROG &&
1201 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1202 break;
1203
1204 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1205 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001206 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001207 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1208 else
1209 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1210
1211 /*
1212 * If a splitted program command has no more data to transfer,
1213 * the command dispatch must be issued to complete.
1214 */
1215 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001216 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001217 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1218 }
1219 } while (1);
1220
1221 info->state = STATE_IDLE;
1222}
1223
1224static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001225 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1226 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001227{
1228 chip->write_buf(mtd, buf, mtd->writesize);
1229 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1230
1231 return 0;
1232}
1233
1234static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1235 struct nand_chip *chip, uint8_t *buf, int oob_required,
1236 int page)
1237{
Scott Wood17fed142016-05-30 13:57:56 -05001238 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001239 struct pxa3xx_nand_info *info = host->info_data;
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001240 int bf;
Stefan Roese75659da2015-07-23 10:26:16 +02001241
1242 chip->read_buf(mtd, buf, mtd->writesize);
1243 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1244
1245 if (info->retcode == ERR_CORERR && info->use_ecc) {
1246 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1247
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001248 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
Stefan Roese75659da2015-07-23 10:26:16 +02001249 /*
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001250 * Empty pages will trigger uncorrectable errors. Re-read the
1251 * entire page in raw mode and check for bits not being "1".
1252 * If there are more than the supported strength, then it means
1253 * this is an actual uncorrectable error.
Stefan Roese75659da2015-07-23 10:26:16 +02001254 */
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001255 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1256 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1257 chip->oob_poi, mtd->oobsize,
1258 NULL, 0, chip->ecc.strength);
1259 if (bf < 0) {
1260 mtd->ecc_stats.failed++;
1261 } else if (bf) {
1262 mtd->ecc_stats.corrected += bf;
1263 info->max_bitflips = max_t(unsigned int,
1264 info->max_bitflips, bf);
1265 info->retcode = ERR_CORERR;
1266 } else {
1267 info->retcode = ERR_NONE;
1268 }
1269
1270 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1271 /* Raw read is not supported with Hamming ECC engine */
Stefan Roese75659da2015-07-23 10:26:16 +02001272 if (is_buf_blank(buf, mtd->writesize))
1273 info->retcode = ERR_NONE;
1274 else
1275 mtd->ecc_stats.failed++;
1276 }
1277
1278 return info->max_bitflips;
1279}
1280
Miquel Raynal30a016a2018-10-11 17:45:42 +02001281static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1282 struct nand_chip *chip, uint8_t *buf,
1283 int oob_required, int page)
1284{
1285 struct pxa3xx_nand_host *host = chip->priv;
1286 struct pxa3xx_nand_info *info = host->info_data;
1287 int chunk, ecc_off_buf;
1288
1289 if (!info->ecc_bch)
1290 return -ENOTSUPP;
1291
1292 /*
1293 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1294 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1295 */
1296 info->force_raw = true;
1297 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1298
1299 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1300 info->last_spare_size;
1301 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1302 chip->read_buf(mtd,
1303 buf + (chunk * info->chunk_size),
1304 info->chunk_size);
1305 chip->read_buf(mtd,
1306 chip->oob_poi +
1307 (chunk * (info->spare_size)),
1308 info->spare_size);
1309 chip->read_buf(mtd,
1310 chip->oob_poi + ecc_off_buf +
1311 (chunk * (info->ecc_size)),
1312 info->ecc_size - 2);
1313 }
1314
1315 if (info->ntotalchunks > info->nfullchunks) {
1316 chip->read_buf(mtd,
1317 buf + (info->nfullchunks * info->chunk_size),
1318 info->last_chunk_size);
1319 chip->read_buf(mtd,
1320 chip->oob_poi +
1321 (info->nfullchunks * (info->spare_size)),
1322 info->last_spare_size);
1323 chip->read_buf(mtd,
1324 chip->oob_poi + ecc_off_buf +
1325 (info->nfullchunks * (info->ecc_size)),
1326 info->ecc_size - 2);
1327 }
1328
1329 info->force_raw = false;
1330
1331 return 0;
1332}
1333
1334static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1335 struct nand_chip *chip, int page)
1336{
1337 /* Invalidate page cache */
1338 chip->pagebuf = -1;
1339
1340 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1341 page);
1342}
1343
Stefan Roese75659da2015-07-23 10:26:16 +02001344static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1345{
Scott Wood17fed142016-05-30 13:57:56 -05001346 struct nand_chip *chip = mtd_to_nand(mtd);
1347 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001348 struct pxa3xx_nand_info *info = host->info_data;
1349 char retval = 0xFF;
1350
1351 if (info->buf_start < info->buf_count)
1352 /* Has just send a new command? */
1353 retval = info->data_buff[info->buf_start++];
1354
1355 return retval;
1356}
1357
1358static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1359{
Scott Wood17fed142016-05-30 13:57:56 -05001360 struct nand_chip *chip = mtd_to_nand(mtd);
1361 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001362 struct pxa3xx_nand_info *info = host->info_data;
1363 u16 retval = 0xFFFF;
1364
1365 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1366 retval = *((u16 *)(info->data_buff+info->buf_start));
1367 info->buf_start += 2;
1368 }
1369 return retval;
1370}
1371
1372static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1373{
Scott Wood17fed142016-05-30 13:57:56 -05001374 struct nand_chip *chip = mtd_to_nand(mtd);
1375 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001376 struct pxa3xx_nand_info *info = host->info_data;
1377 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1378
1379 memcpy(buf, info->data_buff + info->buf_start, real_len);
1380 info->buf_start += real_len;
1381}
1382
1383static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1384 const uint8_t *buf, int len)
1385{
Scott Wood17fed142016-05-30 13:57:56 -05001386 struct nand_chip *chip = mtd_to_nand(mtd);
1387 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001388 struct pxa3xx_nand_info *info = host->info_data;
1389 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1390
1391 memcpy(info->data_buff + info->buf_start, buf, real_len);
1392 info->buf_start += real_len;
1393}
1394
1395static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1396{
1397 return;
1398}
1399
1400static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1401{
Scott Wood17fed142016-05-30 13:57:56 -05001402 struct nand_chip *chip = mtd_to_nand(mtd);
1403 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001404 struct pxa3xx_nand_info *info = host->info_data;
1405
1406 if (info->need_wait) {
1407 u32 ts;
1408
1409 info->need_wait = 0;
1410
1411 ts = get_timer(0);
1412 while (1) {
1413 u32 status;
1414
1415 status = nand_readl(info, NDSR);
1416 if (status)
1417 pxa3xx_nand_irq(info);
1418
1419 if (info->dev_ready)
1420 break;
1421
1422 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1423 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1424 return NAND_STATUS_FAIL;
1425 }
1426 }
1427 }
1428
1429 /* pxa3xx_nand_send_command has waited for command complete */
1430 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1431 if (info->retcode == ERR_NONE)
1432 return 0;
1433 else
1434 return NAND_STATUS_FAIL;
1435 }
1436
1437 return NAND_STATUS_READY;
1438}
1439
Ofer Heifetz531816e2018-08-29 11:56:07 +03001440static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1441{
1442 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1443
1444 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001445 info->reg_ndcr = 0x0; /* enable all interrupts */
1446 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1447 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1448 info->reg_ndcr |= NDCR_SPARE_EN;
1449
1450 return 0;
1451}
1452
1453static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001454{
1455 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001456 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001457 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001458
1459 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1460 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1461 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001462}
1463
Ofer Heifetz268979f2018-08-29 11:56:08 +03001464static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001465{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001466 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001467 uint32_t ndcr = nand_readl(info, NDCR);
1468
Stefan Roese75659da2015-07-23 10:26:16 +02001469 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001470 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001471 info->reg_ndcr = ndcr &
1472 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1473 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001474 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1475 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001476}
1477
1478static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1479{
1480 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1481 if (info->data_buff == NULL)
1482 return -ENOMEM;
1483 return 0;
1484}
1485
1486static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1487{
1488 struct pxa3xx_nand_info *info = host->info_data;
1489 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1490 struct mtd_info *mtd;
1491 struct nand_chip *chip;
1492 const struct nand_sdr_timings *timings;
1493 int ret;
1494
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001495 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001496 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001497
1498 /* configure default flash values */
1499 info->reg_ndcr = 0x0; /* enable all interrupts */
1500 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001501 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001502 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1503
1504 /* use the common timing to make a try */
1505 timings = onfi_async_timing_mode_to_sdr_timings(0);
1506 if (IS_ERR(timings))
1507 return PTR_ERR(timings);
1508
1509 pxa3xx_nand_set_sdr_timing(host, timings);
1510
1511 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1512 ret = chip->waitfunc(mtd, chip);
1513 if (ret & NAND_STATUS_FAIL)
1514 return -ENODEV;
1515
1516 return 0;
1517}
1518
1519static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1520 struct nand_ecc_ctrl *ecc,
1521 int strength, int ecc_stepsize, int page_size)
1522{
1523 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001524 info->nfullchunks = 1;
1525 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001526 info->chunk_size = 2048;
1527 info->spare_size = 40;
1528 info->ecc_size = 24;
1529 ecc->mode = NAND_ECC_HW;
1530 ecc->size = 512;
1531 ecc->strength = 1;
1532
1533 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001534 info->nfullchunks = 1;
1535 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001536 info->chunk_size = 512;
1537 info->spare_size = 8;
1538 info->ecc_size = 8;
1539 ecc->mode = NAND_ECC_HW;
1540 ecc->size = 512;
1541 ecc->strength = 1;
1542
1543 /*
1544 * Required ECC: 4-bit correction per 512 bytes
1545 * Select: 16-bit correction per 2048 bytes
1546 */
1547 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1548 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001549 info->nfullchunks = 1;
1550 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001551 info->chunk_size = 2048;
1552 info->spare_size = 32;
1553 info->ecc_size = 32;
1554 ecc->mode = NAND_ECC_HW;
1555 ecc->size = info->chunk_size;
1556 ecc->layout = &ecc_layout_2KB_bch4bit;
1557 ecc->strength = 16;
1558
1559 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1560 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001561 info->nfullchunks = 2;
1562 info->ntotalchunks = 2;
Stefan Roese75659da2015-07-23 10:26:16 +02001563 info->chunk_size = 2048;
1564 info->spare_size = 32;
1565 info->ecc_size = 32;
1566 ecc->mode = NAND_ECC_HW;
1567 ecc->size = info->chunk_size;
1568 ecc->layout = &ecc_layout_4KB_bch4bit;
1569 ecc->strength = 16;
1570
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001571 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1572 info->ecc_bch = 1;
1573 info->nfullchunks = 4;
1574 info->ntotalchunks = 4;
1575 info->chunk_size = 2048;
1576 info->spare_size = 32;
1577 info->ecc_size = 32;
1578 ecc->mode = NAND_ECC_HW;
1579 ecc->size = info->chunk_size;
1580 ecc->layout = &ecc_layout_8KB_bch4bit;
1581 ecc->strength = 16;
1582
Stefan Roese75659da2015-07-23 10:26:16 +02001583 /*
1584 * Required ECC: 8-bit correction per 512 bytes
1585 * Select: 16-bit correction per 1024 bytes
1586 */
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001587 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1588 info->ecc_bch = 1;
1589 info->nfullchunks = 1;
1590 info->ntotalchunks = 2;
1591 info->chunk_size = 1024;
1592 info->spare_size = 0;
1593 info->last_chunk_size = 1024;
Miquel Raynal53e9c122018-10-11 17:45:44 +02001594 info->last_spare_size = 32;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001595 info->ecc_size = 32;
1596 ecc->mode = NAND_ECC_HW;
1597 ecc->size = info->chunk_size;
1598 ecc->layout = &ecc_layout_2KB_bch8bit;
1599 ecc->strength = 16;
1600
Stefan Roese75659da2015-07-23 10:26:16 +02001601 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1602 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001603 info->nfullchunks = 4;
1604 info->ntotalchunks = 5;
Stefan Roese75659da2015-07-23 10:26:16 +02001605 info->chunk_size = 1024;
1606 info->spare_size = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001607 info->last_chunk_size = 0;
1608 info->last_spare_size = 64;
Stefan Roese75659da2015-07-23 10:26:16 +02001609 info->ecc_size = 32;
1610 ecc->mode = NAND_ECC_HW;
1611 ecc->size = info->chunk_size;
1612 ecc->layout = &ecc_layout_4KB_bch8bit;
1613 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001614
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001615 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001616 info->ecc_bch = 1;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001617 info->nfullchunks = 8;
1618 info->ntotalchunks = 9;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001619 info->chunk_size = 1024;
1620 info->spare_size = 0;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001621 info->last_chunk_size = 0;
1622 info->last_spare_size = 160;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001623 info->ecc_size = 32;
1624 ecc->mode = NAND_ECC_HW;
1625 ecc->size = info->chunk_size;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001626 ecc->layout = &ecc_layout_8KB_bch8bit;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001627 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001628
Stefan Roese75659da2015-07-23 10:26:16 +02001629 } else {
1630 dev_err(&info->pdev->dev,
1631 "ECC strength %d at page size %d is not supported\n",
1632 strength, page_size);
1633 return -ENODEV;
1634 }
1635
1636 return 0;
1637}
1638
1639static int pxa3xx_nand_scan(struct mtd_info *mtd)
1640{
Scott Wood17fed142016-05-30 13:57:56 -05001641 struct nand_chip *chip = mtd_to_nand(mtd);
1642 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001643 struct pxa3xx_nand_info *info = host->info_data;
1644 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001645 int ret;
1646 uint16_t ecc_strength, ecc_step;
1647
Ofer Heifetz268979f2018-08-29 11:56:08 +03001648 if (pdata->keep_config) {
1649 pxa3xx_nand_detect_config(info);
1650 } else {
1651 ret = pxa3xx_nand_config_ident(info);
1652 if (ret)
1653 return ret;
1654 ret = pxa3xx_nand_sensing(host);
1655 if (ret) {
1656 dev_info(&info->pdev->dev,
1657 "There is no chip on cs %d!\n",
1658 info->cs);
1659 return ret;
1660 }
Stefan Roese75659da2015-07-23 10:26:16 +02001661 }
1662
Stefan Roese75659da2015-07-23 10:26:16 +02001663 /* Device detection must be done with ECC disabled */
1664 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1665 nand_writel(info, NDECCCTRL, 0x0);
1666
1667 if (nand_scan_ident(mtd, 1, NULL))
1668 return -ENODEV;
1669
1670 if (!pdata->keep_config) {
1671 ret = pxa3xx_nand_init_timings(host);
1672 if (ret) {
1673 dev_err(&info->pdev->dev,
1674 "Failed to set timings: %d\n", ret);
1675 return ret;
1676 }
1677 }
1678
Stefan Roese75659da2015-07-23 10:26:16 +02001679#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1680 /*
1681 * We'll use a bad block table stored in-flash and don't
1682 * allow writing the bad block marker to the flash.
1683 */
1684 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1685 chip->bbt_td = &bbt_main_descr;
1686 chip->bbt_md = &bbt_mirror_descr;
1687#endif
1688
Stefan Roese75659da2015-07-23 10:26:16 +02001689 if (pdata->ecc_strength && pdata->ecc_step_size) {
1690 ecc_strength = pdata->ecc_strength;
1691 ecc_step = pdata->ecc_step_size;
1692 } else {
1693 ecc_strength = chip->ecc_strength_ds;
1694 ecc_step = chip->ecc_step_ds;
1695 }
1696
1697 /* Set default ECC strength requirements on non-ONFI devices */
1698 if (ecc_strength < 1 && ecc_step < 1) {
1699 ecc_strength = 1;
1700 ecc_step = 512;
1701 }
1702
1703 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1704 ecc_step, mtd->writesize);
1705 if (ret)
1706 return ret;
1707
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001708 /*
1709 * If the page size is bigger than the FIFO size, let's check
1710 * we are given the right variant and then switch to the extended
1711 * (aka split) command handling,
1712 */
1713 if (mtd->writesize > info->chunk_size) {
1714 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1715 chip->cmdfunc = nand_cmdfunc_extended;
1716 } else {
1717 dev_err(&info->pdev->dev,
1718 "unsupported page size on this variant\n");
1719 return -ENODEV;
1720 }
1721 }
1722
Stefan Roese75659da2015-07-23 10:26:16 +02001723 /* calculate addressing information */
1724 if (mtd->writesize >= 2048)
1725 host->col_addr_cycles = 2;
1726 else
1727 host->col_addr_cycles = 1;
1728
1729 /* release the initial buffer */
1730 kfree(info->data_buff);
1731
1732 /* allocate the real data + oob buffer */
1733 info->buf_size = mtd->writesize + mtd->oobsize;
1734 ret = pxa3xx_nand_init_buff(info);
1735 if (ret)
1736 return ret;
1737 info->oob_buff = info->data_buff + mtd->writesize;
1738
1739 if ((mtd->size >> chip->page_shift) > 65536)
1740 host->row_addr_cycles = 3;
1741 else
1742 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001743
1744 if (!pdata->keep_config)
1745 pxa3xx_nand_config_tail(info);
1746
Stefan Roese75659da2015-07-23 10:26:16 +02001747 return nand_scan_tail(mtd);
1748}
1749
1750static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1751{
1752 struct pxa3xx_nand_platform_data *pdata;
1753 struct pxa3xx_nand_host *host;
1754 struct nand_chip *chip = NULL;
1755 struct mtd_info *mtd;
1756 int ret, cs;
1757
1758 pdata = info->pdata;
1759 if (pdata->num_cs <= 0)
1760 return -ENODEV;
1761
1762 info->variant = pxa3xx_nand_get_variant();
1763 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001764 chip = (struct nand_chip *)
1765 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001766 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001767 host = (struct pxa3xx_nand_host *)chip;
1768 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001769 host->cs = cs;
1770 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001771 mtd->owner = THIS_MODULE;
1772
Chris Packham3c2170a2016-08-29 15:20:52 +12001773 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001774 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
Miquel Raynal30a016a2018-10-11 17:45:42 +02001775 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1776 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
Stefan Roese75659da2015-07-23 10:26:16 +02001777 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1778 chip->controller = &info->controller;
1779 chip->waitfunc = pxa3xx_nand_waitfunc;
1780 chip->select_chip = pxa3xx_nand_select_chip;
1781 chip->read_word = pxa3xx_nand_read_word;
1782 chip->read_byte = pxa3xx_nand_read_byte;
1783 chip->read_buf = pxa3xx_nand_read_buf;
1784 chip->write_buf = pxa3xx_nand_write_buf;
1785 chip->options |= NAND_NO_SUBPAGE_WRITE;
1786 chip->cmdfunc = nand_cmdfunc;
1787 }
1788
Stefan Roese75659da2015-07-23 10:26:16 +02001789 /* Allocate a buffer to allow flash detection */
1790 info->buf_size = INIT_BUFFER_SIZE;
1791 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1792 if (info->data_buff == NULL) {
1793 ret = -ENOMEM;
1794 goto fail_disable_clk;
1795 }
1796
1797 /* initialize all interrupts to be disabled */
1798 disable_int(info, NDSR_MASK);
1799
1800 return 0;
1801
1802 kfree(info->data_buff);
1803fail_disable_clk:
1804 return ret;
1805}
1806
1807static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1808{
1809 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001810 const void *blob = gd->fdt_blob;
1811 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001812
1813 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1814 if (!pdata)
1815 return -ENOMEM;
1816
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001817 /* Get address decoding nodes from the FDT blob */
1818 do {
1819 node = fdt_node_offset_by_compatible(blob, node,
1820 "marvell,mvebu-pxa3xx-nand");
1821 if (node < 0)
1822 break;
1823
1824 /* Bypass disabeld nodes */
1825 if (!fdtdec_get_is_enabled(blob, node))
1826 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001827
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001828 /* Get the first enabled NAND controler base address */
1829 info->mmio_base =
1830 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1831 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001832
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001833 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1834 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001835 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001836 break;
1837 }
1838
1839 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1840 pdata->enable_arbiter = 1;
1841
1842 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1843 pdata->keep_config = 1;
1844
1845 /*
1846 * ECC parameters.
1847 * If these are not set, they will be selected according
1848 * to the detected flash type.
1849 */
1850 /* ECC strength */
1851 pdata->ecc_strength = fdtdec_get_int(blob, node,
1852 "nand-ecc-strength", 0);
1853
1854 /* ECC step size */
1855 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1856 "nand-ecc-step-size", 0);
1857
1858 info->pdata = pdata;
1859
1860 /* Currently support only a single NAND controller */
1861 return 0;
1862
1863 } while (node >= 0);
1864
1865 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001866}
1867
1868static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1869{
1870 struct pxa3xx_nand_platform_data *pdata;
1871 int ret, cs, probe_success;
1872
1873 ret = pxa3xx_nand_probe_dt(info);
1874 if (ret)
1875 return ret;
1876
1877 pdata = info->pdata;
1878
1879 ret = alloc_nand_resource(info);
1880 if (ret) {
1881 dev_err(&pdev->dev, "alloc nand resource failed\n");
1882 return ret;
1883 }
1884
1885 probe_success = 0;
1886 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001887 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001888
1889 /*
1890 * The mtd name matches the one used in 'mtdparts' kernel
1891 * parameter. This name cannot be changed or otherwise
1892 * user's mtd partitions configuration would get broken.
1893 */
1894 mtd->name = "pxa3xx_nand-0";
1895 info->cs = cs;
1896 ret = pxa3xx_nand_scan(mtd);
1897 if (ret) {
1898 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1899 cs);
1900 continue;
1901 }
1902
Scott Wood2c1b7e12016-05-30 13:57:55 -05001903 if (nand_register(cs, mtd))
1904 continue;
1905
1906 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001907 }
1908
1909 if (!probe_success)
1910 return -ENODEV;
1911
1912 return 0;
1913}
1914
1915/*
1916 * Main initialization routine
1917 */
1918void board_nand_init(void)
1919{
1920 struct pxa3xx_nand_info *info;
1921 struct pxa3xx_nand_host *host;
1922 int ret;
1923
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001924 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001925 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1926 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001927 if (!info)
1928 return;
1929
Stefan Roese75659da2015-07-23 10:26:16 +02001930 ret = pxa3xx_nand_probe(info);
1931 if (ret)
1932 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001933}