blob: 8e450fb91ad0acf1a1e4325649faa4df55860925 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
3 * drivers/mtd/nand/pxa3xx_nand.c
4 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090013#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020014#include <asm/io.h>
15#include <asm/arch/cpu.h>
16#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090017#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020018#include <linux/types.h>
19
20#include "pxa3xx_nand.h"
21
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030022DECLARE_GLOBAL_DATA_PTR;
23
Stefan Roese75659da2015-07-23 10:26:16 +020024#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25#define CHIP_DELAY_TIMEOUT 200
26#define NAND_STOP_DELAY 40
27#define PAGE_CHUNK_SIZE (2048)
28
29/*
30 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030031 * STATUS, READID and PARAM.
32 * ONFI param page is 256 bytes, and there are three redundant copies
33 * to be read. JEDEC param page is 512 bytes, and there are also three
34 * redundant copies to be read.
35 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020036 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030037#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020038
39/* registers and bit definitions */
40#define NDCR (0x00) /* Control register */
41#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
42#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
43#define NDSR (0x14) /* Status Register */
44#define NDPCR (0x18) /* Page Count Register */
45#define NDBDR0 (0x1C) /* Bad Block Register 0 */
46#define NDBDR1 (0x20) /* Bad Block Register 1 */
47#define NDECCCTRL (0x28) /* ECC control */
48#define NDDB (0x40) /* Data Buffer */
49#define NDCB0 (0x48) /* Command Buffer0 */
50#define NDCB1 (0x4C) /* Command Buffer1 */
51#define NDCB2 (0x50) /* Command Buffer2 */
52
53#define NDCR_SPARE_EN (0x1 << 31)
54#define NDCR_ECC_EN (0x1 << 30)
55#define NDCR_DMA_EN (0x1 << 29)
56#define NDCR_ND_RUN (0x1 << 28)
57#define NDCR_DWIDTH_C (0x1 << 27)
58#define NDCR_DWIDTH_M (0x1 << 26)
59#define NDCR_PAGE_SZ (0x1 << 24)
60#define NDCR_NCSX (0x1 << 23)
61#define NDCR_ND_MODE (0x3 << 21)
62#define NDCR_NAND_MODE (0x0)
63#define NDCR_CLR_PG_CNT (0x1 << 20)
64#define NDCR_STOP_ON_UNCOR (0x1 << 19)
65#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
66#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
67
68#define NDCR_RA_START (0x1 << 15)
69#define NDCR_PG_PER_BLK (0x1 << 14)
70#define NDCR_ND_ARB_EN (0x1 << 12)
71#define NDCR_INT_MASK (0xFFF)
72
73#define NDSR_MASK (0xfff)
74#define NDSR_ERR_CNT_OFF (16)
75#define NDSR_ERR_CNT_MASK (0x1f)
76#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
77#define NDSR_RDY (0x1 << 12)
78#define NDSR_FLASH_RDY (0x1 << 11)
79#define NDSR_CS0_PAGED (0x1 << 10)
80#define NDSR_CS1_PAGED (0x1 << 9)
81#define NDSR_CS0_CMDD (0x1 << 8)
82#define NDSR_CS1_CMDD (0x1 << 7)
83#define NDSR_CS0_BBD (0x1 << 6)
84#define NDSR_CS1_BBD (0x1 << 5)
85#define NDSR_UNCORERR (0x1 << 4)
86#define NDSR_CORERR (0x1 << 3)
87#define NDSR_WRDREQ (0x1 << 2)
88#define NDSR_RDDREQ (0x1 << 1)
89#define NDSR_WRCMDREQ (0x1)
90
91#define NDCB0_LEN_OVRD (0x1 << 28)
92#define NDCB0_ST_ROW_EN (0x1 << 26)
93#define NDCB0_AUTO_RS (0x1 << 25)
94#define NDCB0_CSEL (0x1 << 24)
95#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
96#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
97#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
98#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
99#define NDCB0_NC (0x1 << 20)
100#define NDCB0_DBC (0x1 << 19)
101#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
102#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
103#define NDCB0_CMD2_MASK (0xff << 8)
104#define NDCB0_CMD1_MASK (0xff)
105#define NDCB0_ADDR_CYC_SHIFT (16)
106
107#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
108#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
109#define EXT_CMD_TYPE_READ 4 /* Read */
110#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
111#define EXT_CMD_TYPE_FINAL 3 /* Final command */
112#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
113#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
114
115/* macros for registers read/write */
116#define nand_writel(info, off, val) \
117 writel((val), (info)->mmio_base + (off))
118
119#define nand_readl(info, off) \
120 readl((info)->mmio_base + (off))
121
122/* error code and state */
123enum {
124 ERR_NONE = 0,
125 ERR_DMABUSERR = -1,
126 ERR_SENDCMD = -2,
127 ERR_UNCORERR = -3,
128 ERR_BBERR = -4,
129 ERR_CORERR = -5,
130};
131
132enum {
133 STATE_IDLE = 0,
134 STATE_PREPARED,
135 STATE_CMD_HANDLE,
136 STATE_DMA_READING,
137 STATE_DMA_WRITING,
138 STATE_DMA_DONE,
139 STATE_PIO_READING,
140 STATE_PIO_WRITING,
141 STATE_CMD_DONE,
142 STATE_READY,
143};
144
145enum pxa3xx_nand_variant {
146 PXA3XX_NAND_VARIANT_PXA,
147 PXA3XX_NAND_VARIANT_ARMADA370,
148};
149
150struct pxa3xx_nand_host {
151 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200152 void *info_data;
153
154 /* page size of attached chip */
155 int use_ecc;
156 int cs;
157
158 /* calculated from pxa3xx_nand_flash data */
159 unsigned int col_addr_cycles;
160 unsigned int row_addr_cycles;
161 size_t read_id_bytes;
162
163};
164
165struct pxa3xx_nand_info {
166 struct nand_hw_control controller;
167 struct pxa3xx_nand_platform_data *pdata;
168
169 struct clk *clk;
170 void __iomem *mmio_base;
171 unsigned long mmio_phys;
172 int cmd_complete, dev_ready;
173
174 unsigned int buf_start;
175 unsigned int buf_count;
176 unsigned int buf_size;
177 unsigned int data_buff_pos;
178 unsigned int oob_buff_pos;
179
180 unsigned char *data_buff;
181 unsigned char *oob_buff;
182
183 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
184 unsigned int state;
185
186 /*
187 * This driver supports NFCv1 (as found in PXA SoC)
188 * and NFCv2 (as found in Armada 370/XP SoC).
189 */
190 enum pxa3xx_nand_variant variant;
191
192 int cs;
193 int use_ecc; /* use HW ECC ? */
194 int ecc_bch; /* using BCH ECC? */
195 int use_spare; /* use spare ? */
196 int need_wait;
197
198 unsigned int data_size; /* data to be read from FIFO */
199 unsigned int chunk_size; /* split commands chunk size */
200 unsigned int oob_size;
201 unsigned int spare_size;
202 unsigned int ecc_size;
203 unsigned int ecc_err_cnt;
204 unsigned int max_bitflips;
205 int retcode;
206
207 /* cached register value */
208 uint32_t reg_ndcr;
209 uint32_t ndtr0cs0;
210 uint32_t ndtr1cs0;
211
212 /* generated NDCBx register values */
213 uint32_t ndcb0;
214 uint32_t ndcb1;
215 uint32_t ndcb2;
216 uint32_t ndcb3;
217};
218
219static struct pxa3xx_nand_timing timing[] = {
220 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
221 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
222 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
223 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
224};
225
226static struct pxa3xx_nand_flash builtin_flash_types[] = {
227 { 0x46ec, 16, 16, &timing[1] },
228 { 0xdaec, 8, 8, &timing[1] },
229 { 0xd7ec, 8, 8, &timing[1] },
230 { 0xa12c, 8, 8, &timing[2] },
231 { 0xb12c, 16, 16, &timing[2] },
232 { 0xdc2c, 8, 8, &timing[2] },
233 { 0xcc2c, 16, 16, &timing[2] },
234 { 0xba20, 16, 16, &timing[3] },
235};
236
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100237#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200238static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
239static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
240
241static struct nand_bbt_descr bbt_main_descr = {
242 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
243 | NAND_BBT_2BIT | NAND_BBT_VERSION,
244 .offs = 8,
245 .len = 6,
246 .veroffs = 14,
247 .maxblocks = 8, /* Last 8 blocks in each chip */
248 .pattern = bbt_pattern
249};
250
251static struct nand_bbt_descr bbt_mirror_descr = {
252 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
253 | NAND_BBT_2BIT | NAND_BBT_VERSION,
254 .offs = 8,
255 .len = 6,
256 .veroffs = 14,
257 .maxblocks = 8, /* Last 8 blocks in each chip */
258 .pattern = bbt_mirror_pattern
259};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100260#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200261
262static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
263 .eccbytes = 32,
264 .eccpos = {
265 32, 33, 34, 35, 36, 37, 38, 39,
266 40, 41, 42, 43, 44, 45, 46, 47,
267 48, 49, 50, 51, 52, 53, 54, 55,
268 56, 57, 58, 59, 60, 61, 62, 63},
269 .oobfree = { {2, 30} }
270};
271
272static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
273 .eccbytes = 64,
274 .eccpos = {
275 32, 33, 34, 35, 36, 37, 38, 39,
276 40, 41, 42, 43, 44, 45, 46, 47,
277 48, 49, 50, 51, 52, 53, 54, 55,
278 56, 57, 58, 59, 60, 61, 62, 63,
279 96, 97, 98, 99, 100, 101, 102, 103,
280 104, 105, 106, 107, 108, 109, 110, 111,
281 112, 113, 114, 115, 116, 117, 118, 119,
282 120, 121, 122, 123, 124, 125, 126, 127},
283 /* Bootrom looks in bytes 0 & 5 for bad blocks */
284 .oobfree = { {6, 26}, { 64, 32} }
285};
286
287static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
288 .eccbytes = 128,
289 .eccpos = {
290 32, 33, 34, 35, 36, 37, 38, 39,
291 40, 41, 42, 43, 44, 45, 46, 47,
292 48, 49, 50, 51, 52, 53, 54, 55,
293 56, 57, 58, 59, 60, 61, 62, 63},
294 .oobfree = { }
295};
296
297#define NDTR0_tCH(c) (min((c), 7) << 19)
298#define NDTR0_tCS(c) (min((c), 7) << 16)
299#define NDTR0_tWH(c) (min((c), 7) << 11)
300#define NDTR0_tWP(c) (min((c), 7) << 8)
301#define NDTR0_tRH(c) (min((c), 7) << 3)
302#define NDTR0_tRP(c) (min((c), 7) << 0)
303
304#define NDTR1_tR(c) (min((c), 65535) << 16)
305#define NDTR1_tWHR(c) (min((c), 15) << 4)
306#define NDTR1_tAR(c) (min((c), 15) << 0)
307
308/* convert nano-seconds to nand flash controller clock cycles */
309#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
310
311static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
312{
313 /* We only support the Armada 370/XP/38x for now */
314 return PXA3XX_NAND_VARIANT_ARMADA370;
315}
316
317static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
318 const struct pxa3xx_nand_timing *t)
319{
320 struct pxa3xx_nand_info *info = host->info_data;
321 unsigned long nand_clk = mvebu_get_nand_clock();
322 uint32_t ndtr0, ndtr1;
323
324 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
325 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
326 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
327 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
328 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
329 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
330
331 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
332 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
333 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
334
335 info->ndtr0cs0 = ndtr0;
336 info->ndtr1cs0 = ndtr1;
337 nand_writel(info, NDTR0CS0, ndtr0);
338 nand_writel(info, NDTR1CS0, ndtr1);
339}
340
341static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
342 const struct nand_sdr_timings *t)
343{
344 struct pxa3xx_nand_info *info = host->info_data;
345 struct nand_chip *chip = &host->chip;
346 unsigned long nand_clk = mvebu_get_nand_clock();
347 uint32_t ndtr0, ndtr1;
348
349 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
350 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
351 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300352 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200353 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300354 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200355 u32 tR = chip->chip_delay * 1000;
356 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
357 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
358
359 /* fallback to a default value if tR = 0 */
360 if (!tR)
361 tR = 20000;
362
363 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
364 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
365 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
366 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
367 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
368 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
369
370 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
371 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
372 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
373
374 info->ndtr0cs0 = ndtr0;
375 info->ndtr1cs0 = ndtr1;
376 nand_writel(info, NDTR0CS0, ndtr0);
377 nand_writel(info, NDTR1CS0, ndtr1);
378}
379
380static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
381{
382 const struct nand_sdr_timings *timings;
383 struct nand_chip *chip = &host->chip;
384 struct pxa3xx_nand_info *info = host->info_data;
385 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300386 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200387 int mode, id, ntypes, i;
388
389 mode = onfi_get_async_timing_mode(chip);
390 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
391 ntypes = ARRAY_SIZE(builtin_flash_types);
392
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300393 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200394
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300395 id = chip->read_byte(mtd);
396 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200397
398 for (i = 0; i < ntypes; i++) {
399 f = &builtin_flash_types[i];
400
401 if (f->chip_id == id)
402 break;
403 }
404
405 if (i == ntypes) {
406 dev_err(&info->pdev->dev, "Error: timings not found\n");
407 return -EINVAL;
408 }
409
410 pxa3xx_nand_set_timing(host, f->timing);
411
412 if (f->flash_width == 16) {
413 info->reg_ndcr |= NDCR_DWIDTH_M;
414 chip->options |= NAND_BUSWIDTH_16;
415 }
416
417 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
418 } else {
419 mode = fls(mode) - 1;
420 if (mode < 0)
421 mode = 0;
422
423 timings = onfi_async_timing_mode_to_sdr_timings(mode);
424 if (IS_ERR(timings))
425 return PTR_ERR(timings);
426
427 pxa3xx_nand_set_sdr_timing(host, timings);
428 }
429
430 return 0;
431}
432
433/*
434 * Set the data and OOB size, depending on the selected
435 * spare and ECC configuration.
436 * Only applicable to READ0, READOOB and PAGEPROG commands.
437 */
438static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
439 struct mtd_info *mtd)
440{
441 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
442
443 info->data_size = mtd->writesize;
444 if (!oob_enable)
445 return;
446
447 info->oob_size = info->spare_size;
448 if (!info->use_ecc)
449 info->oob_size += info->ecc_size;
450}
451
452/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800453 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200454 * command buffer, otherwise, it does not work.
455 * We enable all the interrupt at the same time, and
456 * let pxa3xx_nand_irq to handle all logic.
457 */
458static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
459{
460 uint32_t ndcr;
461
462 ndcr = info->reg_ndcr;
463
464 if (info->use_ecc) {
465 ndcr |= NDCR_ECC_EN;
466 if (info->ecc_bch)
467 nand_writel(info, NDECCCTRL, 0x1);
468 } else {
469 ndcr &= ~NDCR_ECC_EN;
470 if (info->ecc_bch)
471 nand_writel(info, NDECCCTRL, 0x0);
472 }
473
474 ndcr &= ~NDCR_DMA_EN;
475
476 if (info->use_spare)
477 ndcr |= NDCR_SPARE_EN;
478 else
479 ndcr &= ~NDCR_SPARE_EN;
480
481 ndcr |= NDCR_ND_RUN;
482
483 /* clear status bits and run */
484 nand_writel(info, NDCR, 0);
485 nand_writel(info, NDSR, NDSR_MASK);
486 nand_writel(info, NDCR, ndcr);
487}
488
489static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
490{
491 uint32_t ndcr;
492
493 ndcr = nand_readl(info, NDCR);
494 nand_writel(info, NDCR, ndcr | int_mask);
495}
496
497static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
498{
499 if (info->ecc_bch) {
500 u32 ts;
501
502 /*
503 * According to the datasheet, when reading from NDDB
504 * with BCH enabled, after each 32 bytes reads, we
505 * have to make sure that the NDSR.RDDREQ bit is set.
506 *
507 * Drain the FIFO 8 32 bits reads at a time, and skip
508 * the polling on the last read.
509 */
510 while (len > 8) {
511 readsl(info->mmio_base + NDDB, data, 8);
512
513 ts = get_timer(0);
514 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
515 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
516 dev_err(&info->pdev->dev,
517 "Timeout on RDDREQ while draining the FIFO\n");
518 return;
519 }
520 }
521
522 data += 32;
523 len -= 8;
524 }
525 }
526
527 readsl(info->mmio_base + NDDB, data, len);
528}
529
530static void handle_data_pio(struct pxa3xx_nand_info *info)
531{
532 unsigned int do_bytes = min(info->data_size, info->chunk_size);
533
534 switch (info->state) {
535 case STATE_PIO_WRITING:
536 writesl(info->mmio_base + NDDB,
537 info->data_buff + info->data_buff_pos,
538 DIV_ROUND_UP(do_bytes, 4));
539
540 if (info->oob_size > 0)
541 writesl(info->mmio_base + NDDB,
542 info->oob_buff + info->oob_buff_pos,
543 DIV_ROUND_UP(info->oob_size, 4));
544 break;
545 case STATE_PIO_READING:
546 drain_fifo(info,
547 info->data_buff + info->data_buff_pos,
548 DIV_ROUND_UP(do_bytes, 4));
549
550 if (info->oob_size > 0)
551 drain_fifo(info,
552 info->oob_buff + info->oob_buff_pos,
553 DIV_ROUND_UP(info->oob_size, 4));
554 break;
555 default:
556 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
557 info->state);
558 BUG();
559 }
560
561 /* Update buffer pointers for multi-page read/write */
562 info->data_buff_pos += do_bytes;
563 info->oob_buff_pos += info->oob_size;
564 info->data_size -= do_bytes;
565}
566
567static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
568{
569 handle_data_pio(info);
570
571 info->state = STATE_CMD_DONE;
572 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
573}
574
575static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
576{
577 unsigned int status, is_completed = 0, is_ready = 0;
578 unsigned int ready, cmd_done;
579 irqreturn_t ret = IRQ_HANDLED;
580
581 if (info->cs == 0) {
582 ready = NDSR_FLASH_RDY;
583 cmd_done = NDSR_CS0_CMDD;
584 } else {
585 ready = NDSR_RDY;
586 cmd_done = NDSR_CS1_CMDD;
587 }
588
589 status = nand_readl(info, NDSR);
590
591 if (status & NDSR_UNCORERR)
592 info->retcode = ERR_UNCORERR;
593 if (status & NDSR_CORERR) {
594 info->retcode = ERR_CORERR;
595 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
596 info->ecc_bch)
597 info->ecc_err_cnt = NDSR_ERR_CNT(status);
598 else
599 info->ecc_err_cnt = 1;
600
601 /*
602 * Each chunk composing a page is corrected independently,
603 * and we need to store maximum number of corrected bitflips
604 * to return it to the MTD layer in ecc.read_page().
605 */
606 info->max_bitflips = max_t(unsigned int,
607 info->max_bitflips,
608 info->ecc_err_cnt);
609 }
610 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
611 info->state = (status & NDSR_RDDREQ) ?
612 STATE_PIO_READING : STATE_PIO_WRITING;
613 /* Call the IRQ thread in U-Boot directly */
614 pxa3xx_nand_irq_thread(info);
615 return 0;
616 }
617 if (status & cmd_done) {
618 info->state = STATE_CMD_DONE;
619 is_completed = 1;
620 }
621 if (status & ready) {
622 info->state = STATE_READY;
623 is_ready = 1;
624 }
625
626 if (status & NDSR_WRCMDREQ) {
627 nand_writel(info, NDSR, NDSR_WRCMDREQ);
628 status &= ~NDSR_WRCMDREQ;
629 info->state = STATE_CMD_HANDLE;
630
631 /*
632 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
633 * must be loaded by writing directly either 12 or 16
634 * bytes directly to NDCB0, four bytes at a time.
635 *
636 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
637 * but each NDCBx register can be read.
638 */
639 nand_writel(info, NDCB0, info->ndcb0);
640 nand_writel(info, NDCB0, info->ndcb1);
641 nand_writel(info, NDCB0, info->ndcb2);
642
643 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
644 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
645 nand_writel(info, NDCB0, info->ndcb3);
646 }
647
648 /* clear NDSR to let the controller exit the IRQ */
649 nand_writel(info, NDSR, status);
650 if (is_completed)
651 info->cmd_complete = 1;
652 if (is_ready)
653 info->dev_ready = 1;
654
655 return ret;
656}
657
658static inline int is_buf_blank(uint8_t *buf, size_t len)
659{
660 for (; len > 0; len--)
661 if (*buf++ != 0xff)
662 return 0;
663 return 1;
664}
665
666static void set_command_address(struct pxa3xx_nand_info *info,
667 unsigned int page_size, uint16_t column, int page_addr)
668{
669 /* small page addr setting */
670 if (page_size < PAGE_CHUNK_SIZE) {
671 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
672 | (column & 0xFF);
673
674 info->ndcb2 = 0;
675 } else {
676 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
677 | (column & 0xFFFF);
678
679 if (page_addr & 0xFF0000)
680 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
681 else
682 info->ndcb2 = 0;
683 }
684}
685
686static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
687{
688 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300689 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200690
691 /* reset data and oob column point to handle data */
692 info->buf_start = 0;
693 info->buf_count = 0;
694 info->oob_size = 0;
695 info->data_buff_pos = 0;
696 info->oob_buff_pos = 0;
697 info->use_ecc = 0;
698 info->use_spare = 1;
699 info->retcode = ERR_NONE;
700 info->ecc_err_cnt = 0;
701 info->ndcb3 = 0;
702 info->need_wait = 0;
703
704 switch (command) {
705 case NAND_CMD_READ0:
706 case NAND_CMD_PAGEPROG:
707 info->use_ecc = 1;
708 case NAND_CMD_READOOB:
709 pxa3xx_set_datasize(info, mtd);
710 break;
711 case NAND_CMD_PARAM:
712 info->use_spare = 0;
713 break;
714 default:
715 info->ndcb1 = 0;
716 info->ndcb2 = 0;
717 break;
718 }
719
720 /*
721 * If we are about to issue a read command, or about to set
722 * the write address, then clean the data buffer.
723 */
724 if (command == NAND_CMD_READ0 ||
725 command == NAND_CMD_READOOB ||
726 command == NAND_CMD_SEQIN) {
727 info->buf_count = mtd->writesize + mtd->oobsize;
728 memset(info->data_buff, 0xFF, info->buf_count);
729 }
730}
731
732static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
733 int ext_cmd_type, uint16_t column, int page_addr)
734{
735 int addr_cycle, exec_cmd;
736 struct pxa3xx_nand_host *host;
737 struct mtd_info *mtd;
738
739 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300740 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200741 addr_cycle = 0;
742 exec_cmd = 1;
743
744 if (info->cs != 0)
745 info->ndcb0 = NDCB0_CSEL;
746 else
747 info->ndcb0 = 0;
748
749 if (command == NAND_CMD_SEQIN)
750 exec_cmd = 0;
751
752 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
753 + host->col_addr_cycles);
754
755 switch (command) {
756 case NAND_CMD_READOOB:
757 case NAND_CMD_READ0:
758 info->buf_start = column;
759 info->ndcb0 |= NDCB0_CMD_TYPE(0)
760 | addr_cycle
761 | NAND_CMD_READ0;
762
763 if (command == NAND_CMD_READOOB)
764 info->buf_start += mtd->writesize;
765
766 /*
767 * Multiple page read needs an 'extended command type' field,
768 * which is either naked-read or last-read according to the
769 * state.
770 */
771 if (mtd->writesize == PAGE_CHUNK_SIZE) {
772 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
773 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
774 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
775 | NDCB0_LEN_OVRD
776 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
777 info->ndcb3 = info->chunk_size +
778 info->oob_size;
779 }
780
781 set_command_address(info, mtd->writesize, column, page_addr);
782 break;
783
784 case NAND_CMD_SEQIN:
785
786 info->buf_start = column;
787 set_command_address(info, mtd->writesize, 0, page_addr);
788
789 /*
790 * Multiple page programming needs to execute the initial
791 * SEQIN command that sets the page address.
792 */
793 if (mtd->writesize > PAGE_CHUNK_SIZE) {
794 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
795 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
796 | addr_cycle
797 | command;
798 /* No data transfer in this case */
799 info->data_size = 0;
800 exec_cmd = 1;
801 }
802 break;
803
804 case NAND_CMD_PAGEPROG:
805 if (is_buf_blank(info->data_buff,
806 (mtd->writesize + mtd->oobsize))) {
807 exec_cmd = 0;
808 break;
809 }
810
811 /* Second command setting for large pages */
812 if (mtd->writesize > PAGE_CHUNK_SIZE) {
813 /*
814 * Multiple page write uses the 'extended command'
815 * field. This can be used to issue a command dispatch
816 * or a naked-write depending on the current stage.
817 */
818 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
819 | NDCB0_LEN_OVRD
820 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
821 info->ndcb3 = info->chunk_size +
822 info->oob_size;
823
824 /*
825 * This is the command dispatch that completes a chunked
826 * page program operation.
827 */
828 if (info->data_size == 0) {
829 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
830 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
831 | command;
832 info->ndcb1 = 0;
833 info->ndcb2 = 0;
834 info->ndcb3 = 0;
835 }
836 } else {
837 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
838 | NDCB0_AUTO_RS
839 | NDCB0_ST_ROW_EN
840 | NDCB0_DBC
841 | (NAND_CMD_PAGEPROG << 8)
842 | NAND_CMD_SEQIN
843 | addr_cycle;
844 }
845 break;
846
847 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300848 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200849 info->ndcb0 |= NDCB0_CMD_TYPE(0)
850 | NDCB0_ADDR_CYC(1)
851 | NDCB0_LEN_OVRD
852 | command;
853 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300854 info->ndcb3 = INIT_BUFFER_SIZE;
855 info->data_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200856 break;
857
858 case NAND_CMD_READID:
859 info->buf_count = host->read_id_bytes;
860 info->ndcb0 |= NDCB0_CMD_TYPE(3)
861 | NDCB0_ADDR_CYC(1)
862 | command;
863 info->ndcb1 = (column & 0xFF);
864
865 info->data_size = 8;
866 break;
867 case NAND_CMD_STATUS:
868 info->buf_count = 1;
869 info->ndcb0 |= NDCB0_CMD_TYPE(4)
870 | NDCB0_ADDR_CYC(1)
871 | command;
872
873 info->data_size = 8;
874 break;
875
876 case NAND_CMD_ERASE1:
877 info->ndcb0 |= NDCB0_CMD_TYPE(2)
878 | NDCB0_AUTO_RS
879 | NDCB0_ADDR_CYC(3)
880 | NDCB0_DBC
881 | (NAND_CMD_ERASE2 << 8)
882 | NAND_CMD_ERASE1;
883 info->ndcb1 = page_addr;
884 info->ndcb2 = 0;
885
886 break;
887 case NAND_CMD_RESET:
888 info->ndcb0 |= NDCB0_CMD_TYPE(5)
889 | command;
890
891 break;
892
893 case NAND_CMD_ERASE2:
894 exec_cmd = 0;
895 break;
896
897 default:
898 exec_cmd = 0;
899 dev_err(&info->pdev->dev, "non-supported command %x\n",
900 command);
901 break;
902 }
903
904 return exec_cmd;
905}
906
907static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
908 int column, int page_addr)
909{
Scott Wood17fed142016-05-30 13:57:56 -0500910 struct nand_chip *chip = mtd_to_nand(mtd);
911 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200912 struct pxa3xx_nand_info *info = host->info_data;
913 int exec_cmd;
914
915 /*
916 * if this is a x16 device ,then convert the input
917 * "byte" address into a "word" address appropriate
918 * for indexing a word-oriented device
919 */
920 if (info->reg_ndcr & NDCR_DWIDTH_M)
921 column /= 2;
922
923 /*
924 * There may be different NAND chip hooked to
925 * different chip select, so check whether
926 * chip select has been changed, if yes, reset the timing
927 */
928 if (info->cs != host->cs) {
929 info->cs = host->cs;
930 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
931 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
932 }
933
934 prepare_start_command(info, command);
935
936 info->state = STATE_PREPARED;
937 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
938
939 if (exec_cmd) {
940 u32 ts;
941
942 info->cmd_complete = 0;
943 info->dev_ready = 0;
944 info->need_wait = 1;
945 pxa3xx_nand_start(info);
946
947 ts = get_timer(0);
948 while (1) {
949 u32 status;
950
951 status = nand_readl(info, NDSR);
952 if (status)
953 pxa3xx_nand_irq(info);
954
955 if (info->cmd_complete)
956 break;
957
958 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
959 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
960 return;
961 }
962 }
963 }
964 info->state = STATE_IDLE;
965}
966
967static void nand_cmdfunc_extended(struct mtd_info *mtd,
968 const unsigned command,
969 int column, int page_addr)
970{
Scott Wood17fed142016-05-30 13:57:56 -0500971 struct nand_chip *chip = mtd_to_nand(mtd);
972 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200973 struct pxa3xx_nand_info *info = host->info_data;
974 int exec_cmd, ext_cmd_type;
975
976 /*
977 * if this is a x16 device then convert the input
978 * "byte" address into a "word" address appropriate
979 * for indexing a word-oriented device
980 */
981 if (info->reg_ndcr & NDCR_DWIDTH_M)
982 column /= 2;
983
984 /*
985 * There may be different NAND chip hooked to
986 * different chip select, so check whether
987 * chip select has been changed, if yes, reset the timing
988 */
989 if (info->cs != host->cs) {
990 info->cs = host->cs;
991 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
992 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
993 }
994
995 /* Select the extended command for the first command */
996 switch (command) {
997 case NAND_CMD_READ0:
998 case NAND_CMD_READOOB:
999 ext_cmd_type = EXT_CMD_TYPE_MONO;
1000 break;
1001 case NAND_CMD_SEQIN:
1002 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1003 break;
1004 case NAND_CMD_PAGEPROG:
1005 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1006 break;
1007 default:
1008 ext_cmd_type = 0;
1009 break;
1010 }
1011
1012 prepare_start_command(info, command);
1013
1014 /*
1015 * Prepare the "is ready" completion before starting a command
1016 * transaction sequence. If the command is not executed the
1017 * completion will be completed, see below.
1018 *
1019 * We can do that inside the loop because the command variable
1020 * is invariant and thus so is the exec_cmd.
1021 */
1022 info->need_wait = 1;
1023 info->dev_ready = 0;
1024
1025 do {
1026 u32 ts;
1027
1028 info->state = STATE_PREPARED;
1029 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1030 column, page_addr);
1031 if (!exec_cmd) {
1032 info->need_wait = 0;
1033 info->dev_ready = 1;
1034 break;
1035 }
1036
1037 info->cmd_complete = 0;
1038 pxa3xx_nand_start(info);
1039
1040 ts = get_timer(0);
1041 while (1) {
1042 u32 status;
1043
1044 status = nand_readl(info, NDSR);
1045 if (status)
1046 pxa3xx_nand_irq(info);
1047
1048 if (info->cmd_complete)
1049 break;
1050
1051 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1052 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1053 return;
1054 }
1055 }
1056
1057 /* Check if the sequence is complete */
1058 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1059 break;
1060
1061 /*
1062 * After a splitted program command sequence has issued
1063 * the command dispatch, the command sequence is complete.
1064 */
1065 if (info->data_size == 0 &&
1066 command == NAND_CMD_PAGEPROG &&
1067 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1068 break;
1069
1070 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1071 /* Last read: issue a 'last naked read' */
1072 if (info->data_size == info->chunk_size)
1073 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1074 else
1075 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1076
1077 /*
1078 * If a splitted program command has no more data to transfer,
1079 * the command dispatch must be issued to complete.
1080 */
1081 } else if (command == NAND_CMD_PAGEPROG &&
1082 info->data_size == 0) {
1083 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1084 }
1085 } while (1);
1086
1087 info->state = STATE_IDLE;
1088}
1089
1090static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001091 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1092 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001093{
1094 chip->write_buf(mtd, buf, mtd->writesize);
1095 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1096
1097 return 0;
1098}
1099
1100static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1101 struct nand_chip *chip, uint8_t *buf, int oob_required,
1102 int page)
1103{
Scott Wood17fed142016-05-30 13:57:56 -05001104 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001105 struct pxa3xx_nand_info *info = host->info_data;
1106
1107 chip->read_buf(mtd, buf, mtd->writesize);
1108 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1109
1110 if (info->retcode == ERR_CORERR && info->use_ecc) {
1111 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1112
1113 } else if (info->retcode == ERR_UNCORERR) {
1114 /*
1115 * for blank page (all 0xff), HW will calculate its ECC as
1116 * 0, which is different from the ECC information within
1117 * OOB, ignore such uncorrectable errors
1118 */
1119 if (is_buf_blank(buf, mtd->writesize))
1120 info->retcode = ERR_NONE;
1121 else
1122 mtd->ecc_stats.failed++;
1123 }
1124
1125 return info->max_bitflips;
1126}
1127
1128static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1129{
Scott Wood17fed142016-05-30 13:57:56 -05001130 struct nand_chip *chip = mtd_to_nand(mtd);
1131 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001132 struct pxa3xx_nand_info *info = host->info_data;
1133 char retval = 0xFF;
1134
1135 if (info->buf_start < info->buf_count)
1136 /* Has just send a new command? */
1137 retval = info->data_buff[info->buf_start++];
1138
1139 return retval;
1140}
1141
1142static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1143{
Scott Wood17fed142016-05-30 13:57:56 -05001144 struct nand_chip *chip = mtd_to_nand(mtd);
1145 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001146 struct pxa3xx_nand_info *info = host->info_data;
1147 u16 retval = 0xFFFF;
1148
1149 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1150 retval = *((u16 *)(info->data_buff+info->buf_start));
1151 info->buf_start += 2;
1152 }
1153 return retval;
1154}
1155
1156static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1157{
Scott Wood17fed142016-05-30 13:57:56 -05001158 struct nand_chip *chip = mtd_to_nand(mtd);
1159 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001160 struct pxa3xx_nand_info *info = host->info_data;
1161 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1162
1163 memcpy(buf, info->data_buff + info->buf_start, real_len);
1164 info->buf_start += real_len;
1165}
1166
1167static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1168 const uint8_t *buf, int len)
1169{
Scott Wood17fed142016-05-30 13:57:56 -05001170 struct nand_chip *chip = mtd_to_nand(mtd);
1171 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001172 struct pxa3xx_nand_info *info = host->info_data;
1173 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1174
1175 memcpy(info->data_buff + info->buf_start, buf, real_len);
1176 info->buf_start += real_len;
1177}
1178
1179static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1180{
1181 return;
1182}
1183
1184static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1185{
Scott Wood17fed142016-05-30 13:57:56 -05001186 struct nand_chip *chip = mtd_to_nand(mtd);
1187 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001188 struct pxa3xx_nand_info *info = host->info_data;
1189
1190 if (info->need_wait) {
1191 u32 ts;
1192
1193 info->need_wait = 0;
1194
1195 ts = get_timer(0);
1196 while (1) {
1197 u32 status;
1198
1199 status = nand_readl(info, NDSR);
1200 if (status)
1201 pxa3xx_nand_irq(info);
1202
1203 if (info->dev_ready)
1204 break;
1205
1206 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1207 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1208 return NAND_STATUS_FAIL;
1209 }
1210 }
1211 }
1212
1213 /* pxa3xx_nand_send_command has waited for command complete */
1214 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1215 if (info->retcode == ERR_NONE)
1216 return 0;
1217 else
1218 return NAND_STATUS_FAIL;
1219 }
1220
1221 return NAND_STATUS_READY;
1222}
1223
1224static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1225{
1226 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001227 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001228 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001229
1230 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1231 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1232 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1233
1234 return 0;
1235}
1236
1237static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1238{
1239 /*
1240 * We set 0 by hard coding here, for we don't support keep_config
1241 * when there is more than one chip attached to the controller
1242 */
1243 struct pxa3xx_nand_host *host = info->host[0];
1244 uint32_t ndcr = nand_readl(info, NDCR);
1245
1246 if (ndcr & NDCR_PAGE_SZ) {
1247 /* Controller's FIFO size */
1248 info->chunk_size = 2048;
1249 host->read_id_bytes = 4;
1250 } else {
1251 info->chunk_size = 512;
1252 host->read_id_bytes = 2;
1253 }
1254
1255 /* Set an initial chunk size */
1256 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1257 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1258 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1259 return 0;
1260}
1261
1262static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1263{
1264 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1265 if (info->data_buff == NULL)
1266 return -ENOMEM;
1267 return 0;
1268}
1269
1270static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1271{
1272 struct pxa3xx_nand_info *info = host->info_data;
1273 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1274 struct mtd_info *mtd;
1275 struct nand_chip *chip;
1276 const struct nand_sdr_timings *timings;
1277 int ret;
1278
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001279 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001280 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001281
1282 /* configure default flash values */
1283 info->reg_ndcr = 0x0; /* enable all interrupts */
1284 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1285 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1286 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1287
1288 /* use the common timing to make a try */
1289 timings = onfi_async_timing_mode_to_sdr_timings(0);
1290 if (IS_ERR(timings))
1291 return PTR_ERR(timings);
1292
1293 pxa3xx_nand_set_sdr_timing(host, timings);
1294
1295 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1296 ret = chip->waitfunc(mtd, chip);
1297 if (ret & NAND_STATUS_FAIL)
1298 return -ENODEV;
1299
1300 return 0;
1301}
1302
1303static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1304 struct nand_ecc_ctrl *ecc,
1305 int strength, int ecc_stepsize, int page_size)
1306{
1307 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1308 info->chunk_size = 2048;
1309 info->spare_size = 40;
1310 info->ecc_size = 24;
1311 ecc->mode = NAND_ECC_HW;
1312 ecc->size = 512;
1313 ecc->strength = 1;
1314
1315 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1316 info->chunk_size = 512;
1317 info->spare_size = 8;
1318 info->ecc_size = 8;
1319 ecc->mode = NAND_ECC_HW;
1320 ecc->size = 512;
1321 ecc->strength = 1;
1322
1323 /*
1324 * Required ECC: 4-bit correction per 512 bytes
1325 * Select: 16-bit correction per 2048 bytes
1326 */
1327 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1328 info->ecc_bch = 1;
1329 info->chunk_size = 2048;
1330 info->spare_size = 32;
1331 info->ecc_size = 32;
1332 ecc->mode = NAND_ECC_HW;
1333 ecc->size = info->chunk_size;
1334 ecc->layout = &ecc_layout_2KB_bch4bit;
1335 ecc->strength = 16;
1336
1337 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1338 info->ecc_bch = 1;
1339 info->chunk_size = 2048;
1340 info->spare_size = 32;
1341 info->ecc_size = 32;
1342 ecc->mode = NAND_ECC_HW;
1343 ecc->size = info->chunk_size;
1344 ecc->layout = &ecc_layout_4KB_bch4bit;
1345 ecc->strength = 16;
1346
1347 /*
1348 * Required ECC: 8-bit correction per 512 bytes
1349 * Select: 16-bit correction per 1024 bytes
1350 */
1351 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1352 info->ecc_bch = 1;
1353 info->chunk_size = 1024;
1354 info->spare_size = 0;
1355 info->ecc_size = 32;
1356 ecc->mode = NAND_ECC_HW;
1357 ecc->size = info->chunk_size;
1358 ecc->layout = &ecc_layout_4KB_bch8bit;
1359 ecc->strength = 16;
1360 } else {
1361 dev_err(&info->pdev->dev,
1362 "ECC strength %d at page size %d is not supported\n",
1363 strength, page_size);
1364 return -ENODEV;
1365 }
1366
1367 return 0;
1368}
1369
1370static int pxa3xx_nand_scan(struct mtd_info *mtd)
1371{
Scott Wood17fed142016-05-30 13:57:56 -05001372 struct nand_chip *chip = mtd_to_nand(mtd);
1373 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001374 struct pxa3xx_nand_info *info = host->info_data;
1375 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001376 int ret;
1377 uint16_t ecc_strength, ecc_step;
1378
1379 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1380 goto KEEP_CONFIG;
1381
1382 /* Set a default chunk size */
1383 info->chunk_size = 512;
1384
1385 ret = pxa3xx_nand_sensing(host);
1386 if (ret) {
1387 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1388 info->cs);
1389
1390 return ret;
1391 }
1392
1393KEEP_CONFIG:
1394 /* Device detection must be done with ECC disabled */
1395 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1396 nand_writel(info, NDECCCTRL, 0x0);
1397
1398 if (nand_scan_ident(mtd, 1, NULL))
1399 return -ENODEV;
1400
1401 if (!pdata->keep_config) {
1402 ret = pxa3xx_nand_init_timings(host);
1403 if (ret) {
1404 dev_err(&info->pdev->dev,
1405 "Failed to set timings: %d\n", ret);
1406 return ret;
1407 }
1408 }
1409
1410 ret = pxa3xx_nand_config_flash(info);
1411 if (ret)
1412 return ret;
1413
1414#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1415 /*
1416 * We'll use a bad block table stored in-flash and don't
1417 * allow writing the bad block marker to the flash.
1418 */
1419 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1420 chip->bbt_td = &bbt_main_descr;
1421 chip->bbt_md = &bbt_mirror_descr;
1422#endif
1423
1424 /*
1425 * If the page size is bigger than the FIFO size, let's check
1426 * we are given the right variant and then switch to the extended
1427 * (aka splitted) command handling,
1428 */
1429 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1430 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1431 chip->cmdfunc = nand_cmdfunc_extended;
1432 } else {
1433 dev_err(&info->pdev->dev,
1434 "unsupported page size on this variant\n");
1435 return -ENODEV;
1436 }
1437 }
1438
1439 if (pdata->ecc_strength && pdata->ecc_step_size) {
1440 ecc_strength = pdata->ecc_strength;
1441 ecc_step = pdata->ecc_step_size;
1442 } else {
1443 ecc_strength = chip->ecc_strength_ds;
1444 ecc_step = chip->ecc_step_ds;
1445 }
1446
1447 /* Set default ECC strength requirements on non-ONFI devices */
1448 if (ecc_strength < 1 && ecc_step < 1) {
1449 ecc_strength = 1;
1450 ecc_step = 512;
1451 }
1452
1453 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1454 ecc_step, mtd->writesize);
1455 if (ret)
1456 return ret;
1457
1458 /* calculate addressing information */
1459 if (mtd->writesize >= 2048)
1460 host->col_addr_cycles = 2;
1461 else
1462 host->col_addr_cycles = 1;
1463
1464 /* release the initial buffer */
1465 kfree(info->data_buff);
1466
1467 /* allocate the real data + oob buffer */
1468 info->buf_size = mtd->writesize + mtd->oobsize;
1469 ret = pxa3xx_nand_init_buff(info);
1470 if (ret)
1471 return ret;
1472 info->oob_buff = info->data_buff + mtd->writesize;
1473
1474 if ((mtd->size >> chip->page_shift) > 65536)
1475 host->row_addr_cycles = 3;
1476 else
1477 host->row_addr_cycles = 2;
1478 return nand_scan_tail(mtd);
1479}
1480
1481static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1482{
1483 struct pxa3xx_nand_platform_data *pdata;
1484 struct pxa3xx_nand_host *host;
1485 struct nand_chip *chip = NULL;
1486 struct mtd_info *mtd;
1487 int ret, cs;
1488
1489 pdata = info->pdata;
1490 if (pdata->num_cs <= 0)
1491 return -ENODEV;
1492
1493 info->variant = pxa3xx_nand_get_variant();
1494 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001495 chip = (struct nand_chip *)
1496 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001497 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001498 host = (struct pxa3xx_nand_host *)chip;
1499 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001500 host->cs = cs;
1501 host->info_data = info;
1502 host->read_id_bytes = 4;
Stefan Roese75659da2015-07-23 10:26:16 +02001503 mtd->owner = THIS_MODULE;
1504
Chris Packham3c2170a2016-08-29 15:20:52 +12001505 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001506 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1507 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1508 chip->controller = &info->controller;
1509 chip->waitfunc = pxa3xx_nand_waitfunc;
1510 chip->select_chip = pxa3xx_nand_select_chip;
1511 chip->read_word = pxa3xx_nand_read_word;
1512 chip->read_byte = pxa3xx_nand_read_byte;
1513 chip->read_buf = pxa3xx_nand_read_buf;
1514 chip->write_buf = pxa3xx_nand_write_buf;
1515 chip->options |= NAND_NO_SUBPAGE_WRITE;
1516 chip->cmdfunc = nand_cmdfunc;
1517 }
1518
Stefan Roese75659da2015-07-23 10:26:16 +02001519 /* Allocate a buffer to allow flash detection */
1520 info->buf_size = INIT_BUFFER_SIZE;
1521 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1522 if (info->data_buff == NULL) {
1523 ret = -ENOMEM;
1524 goto fail_disable_clk;
1525 }
1526
1527 /* initialize all interrupts to be disabled */
1528 disable_int(info, NDSR_MASK);
1529
1530 return 0;
1531
1532 kfree(info->data_buff);
1533fail_disable_clk:
1534 return ret;
1535}
1536
1537static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1538{
1539 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001540 const void *blob = gd->fdt_blob;
1541 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001542
1543 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1544 if (!pdata)
1545 return -ENOMEM;
1546
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001547 /* Get address decoding nodes from the FDT blob */
1548 do {
1549 node = fdt_node_offset_by_compatible(blob, node,
1550 "marvell,mvebu-pxa3xx-nand");
1551 if (node < 0)
1552 break;
1553
1554 /* Bypass disabeld nodes */
1555 if (!fdtdec_get_is_enabled(blob, node))
1556 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001557
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001558 /* Get the first enabled NAND controler base address */
1559 info->mmio_base =
1560 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1561 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001562
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001563 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1564 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001565 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001566 break;
1567 }
1568
1569 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1570 pdata->enable_arbiter = 1;
1571
1572 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1573 pdata->keep_config = 1;
1574
1575 /*
1576 * ECC parameters.
1577 * If these are not set, they will be selected according
1578 * to the detected flash type.
1579 */
1580 /* ECC strength */
1581 pdata->ecc_strength = fdtdec_get_int(blob, node,
1582 "nand-ecc-strength", 0);
1583
1584 /* ECC step size */
1585 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1586 "nand-ecc-step-size", 0);
1587
1588 info->pdata = pdata;
1589
1590 /* Currently support only a single NAND controller */
1591 return 0;
1592
1593 } while (node >= 0);
1594
1595 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001596}
1597
1598static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1599{
1600 struct pxa3xx_nand_platform_data *pdata;
1601 int ret, cs, probe_success;
1602
1603 ret = pxa3xx_nand_probe_dt(info);
1604 if (ret)
1605 return ret;
1606
1607 pdata = info->pdata;
1608
1609 ret = alloc_nand_resource(info);
1610 if (ret) {
1611 dev_err(&pdev->dev, "alloc nand resource failed\n");
1612 return ret;
1613 }
1614
1615 probe_success = 0;
1616 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001617 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001618
1619 /*
1620 * The mtd name matches the one used in 'mtdparts' kernel
1621 * parameter. This name cannot be changed or otherwise
1622 * user's mtd partitions configuration would get broken.
1623 */
1624 mtd->name = "pxa3xx_nand-0";
1625 info->cs = cs;
1626 ret = pxa3xx_nand_scan(mtd);
1627 if (ret) {
1628 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1629 cs);
1630 continue;
1631 }
1632
Scott Wood2c1b7e12016-05-30 13:57:55 -05001633 if (nand_register(cs, mtd))
1634 continue;
1635
1636 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001637 }
1638
1639 if (!probe_success)
1640 return -ENODEV;
1641
1642 return 0;
1643}
1644
1645/*
1646 * Main initialization routine
1647 */
1648void board_nand_init(void)
1649{
1650 struct pxa3xx_nand_info *info;
1651 struct pxa3xx_nand_host *host;
1652 int ret;
1653
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001654 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001655 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1656 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001657 if (!info)
1658 return;
1659
Stefan Roese75659da2015-07-23 10:26:16 +02001660 ret = pxa3xx_nand_probe(info);
1661 if (ret)
1662 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001663}