blob: 4faf78776f07773cd134a9c056641c36da7efe2d [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
3 * drivers/mtd/nand/pxa3xx_nand.c
4 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090013#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020014#include <asm/io.h>
15#include <asm/arch/cpu.h>
16#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090017#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020018#include <linux/types.h>
19
20#include "pxa3xx_nand.h"
21
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030022DECLARE_GLOBAL_DATA_PTR;
23
Stefan Roese75659da2015-07-23 10:26:16 +020024#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25#define CHIP_DELAY_TIMEOUT 200
26#define NAND_STOP_DELAY 40
27#define PAGE_CHUNK_SIZE (2048)
28
29/*
30 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030031 * STATUS, READID and PARAM.
32 * ONFI param page is 256 bytes, and there are three redundant copies
33 * to be read. JEDEC param page is 512 bytes, and there are also three
34 * redundant copies to be read.
35 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020036 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030037#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020038
39/* registers and bit definitions */
40#define NDCR (0x00) /* Control register */
41#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
42#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
43#define NDSR (0x14) /* Status Register */
44#define NDPCR (0x18) /* Page Count Register */
45#define NDBDR0 (0x1C) /* Bad Block Register 0 */
46#define NDBDR1 (0x20) /* Bad Block Register 1 */
47#define NDECCCTRL (0x28) /* ECC control */
48#define NDDB (0x40) /* Data Buffer */
49#define NDCB0 (0x48) /* Command Buffer0 */
50#define NDCB1 (0x4C) /* Command Buffer1 */
51#define NDCB2 (0x50) /* Command Buffer2 */
52
53#define NDCR_SPARE_EN (0x1 << 31)
54#define NDCR_ECC_EN (0x1 << 30)
55#define NDCR_DMA_EN (0x1 << 29)
56#define NDCR_ND_RUN (0x1 << 28)
57#define NDCR_DWIDTH_C (0x1 << 27)
58#define NDCR_DWIDTH_M (0x1 << 26)
59#define NDCR_PAGE_SZ (0x1 << 24)
60#define NDCR_NCSX (0x1 << 23)
61#define NDCR_ND_MODE (0x3 << 21)
62#define NDCR_NAND_MODE (0x0)
63#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030064#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
65#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020066#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
67#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
68
69#define NDCR_RA_START (0x1 << 15)
70#define NDCR_PG_PER_BLK (0x1 << 14)
71#define NDCR_ND_ARB_EN (0x1 << 12)
72#define NDCR_INT_MASK (0xFFF)
73
74#define NDSR_MASK (0xfff)
75#define NDSR_ERR_CNT_OFF (16)
76#define NDSR_ERR_CNT_MASK (0x1f)
77#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
78#define NDSR_RDY (0x1 << 12)
79#define NDSR_FLASH_RDY (0x1 << 11)
80#define NDSR_CS0_PAGED (0x1 << 10)
81#define NDSR_CS1_PAGED (0x1 << 9)
82#define NDSR_CS0_CMDD (0x1 << 8)
83#define NDSR_CS1_CMDD (0x1 << 7)
84#define NDSR_CS0_BBD (0x1 << 6)
85#define NDSR_CS1_BBD (0x1 << 5)
86#define NDSR_UNCORERR (0x1 << 4)
87#define NDSR_CORERR (0x1 << 3)
88#define NDSR_WRDREQ (0x1 << 2)
89#define NDSR_RDDREQ (0x1 << 1)
90#define NDSR_WRCMDREQ (0x1)
91
92#define NDCB0_LEN_OVRD (0x1 << 28)
93#define NDCB0_ST_ROW_EN (0x1 << 26)
94#define NDCB0_AUTO_RS (0x1 << 25)
95#define NDCB0_CSEL (0x1 << 24)
96#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
97#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
98#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
99#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
100#define NDCB0_NC (0x1 << 20)
101#define NDCB0_DBC (0x1 << 19)
102#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
103#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
104#define NDCB0_CMD2_MASK (0xff << 8)
105#define NDCB0_CMD1_MASK (0xff)
106#define NDCB0_ADDR_CYC_SHIFT (16)
107
108#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
109#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
110#define EXT_CMD_TYPE_READ 4 /* Read */
111#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
112#define EXT_CMD_TYPE_FINAL 3 /* Final command */
113#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
114#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
115
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300116/*
117 * This should be large enough to read 'ONFI' and 'JEDEC'.
118 * Let's use 7 bytes, which is the maximum ID count supported
119 * by the controller (see NDCR_RD_ID_CNT_MASK).
120 */
121#define READ_ID_BYTES 7
122
Stefan Roese75659da2015-07-23 10:26:16 +0200123/* macros for registers read/write */
124#define nand_writel(info, off, val) \
125 writel((val), (info)->mmio_base + (off))
126
127#define nand_readl(info, off) \
128 readl((info)->mmio_base + (off))
129
130/* error code and state */
131enum {
132 ERR_NONE = 0,
133 ERR_DMABUSERR = -1,
134 ERR_SENDCMD = -2,
135 ERR_UNCORERR = -3,
136 ERR_BBERR = -4,
137 ERR_CORERR = -5,
138};
139
140enum {
141 STATE_IDLE = 0,
142 STATE_PREPARED,
143 STATE_CMD_HANDLE,
144 STATE_DMA_READING,
145 STATE_DMA_WRITING,
146 STATE_DMA_DONE,
147 STATE_PIO_READING,
148 STATE_PIO_WRITING,
149 STATE_CMD_DONE,
150 STATE_READY,
151};
152
153enum pxa3xx_nand_variant {
154 PXA3XX_NAND_VARIANT_PXA,
155 PXA3XX_NAND_VARIANT_ARMADA370,
156};
157
158struct pxa3xx_nand_host {
159 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200160 void *info_data;
161
162 /* page size of attached chip */
163 int use_ecc;
164 int cs;
165
166 /* calculated from pxa3xx_nand_flash data */
167 unsigned int col_addr_cycles;
168 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200169};
170
171struct pxa3xx_nand_info {
172 struct nand_hw_control controller;
173 struct pxa3xx_nand_platform_data *pdata;
174
175 struct clk *clk;
176 void __iomem *mmio_base;
177 unsigned long mmio_phys;
178 int cmd_complete, dev_ready;
179
180 unsigned int buf_start;
181 unsigned int buf_count;
182 unsigned int buf_size;
183 unsigned int data_buff_pos;
184 unsigned int oob_buff_pos;
185
186 unsigned char *data_buff;
187 unsigned char *oob_buff;
188
189 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
190 unsigned int state;
191
192 /*
193 * This driver supports NFCv1 (as found in PXA SoC)
194 * and NFCv2 (as found in Armada 370/XP SoC).
195 */
196 enum pxa3xx_nand_variant variant;
197
198 int cs;
199 int use_ecc; /* use HW ECC ? */
200 int ecc_bch; /* using BCH ECC? */
201 int use_spare; /* use spare ? */
202 int need_wait;
203
204 unsigned int data_size; /* data to be read from FIFO */
205 unsigned int chunk_size; /* split commands chunk size */
206 unsigned int oob_size;
207 unsigned int spare_size;
208 unsigned int ecc_size;
209 unsigned int ecc_err_cnt;
210 unsigned int max_bitflips;
211 int retcode;
212
213 /* cached register value */
214 uint32_t reg_ndcr;
215 uint32_t ndtr0cs0;
216 uint32_t ndtr1cs0;
217
218 /* generated NDCBx register values */
219 uint32_t ndcb0;
220 uint32_t ndcb1;
221 uint32_t ndcb2;
222 uint32_t ndcb3;
223};
224
225static struct pxa3xx_nand_timing timing[] = {
226 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
227 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
228 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
229 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
230};
231
232static struct pxa3xx_nand_flash builtin_flash_types[] = {
233 { 0x46ec, 16, 16, &timing[1] },
234 { 0xdaec, 8, 8, &timing[1] },
235 { 0xd7ec, 8, 8, &timing[1] },
236 { 0xa12c, 8, 8, &timing[2] },
237 { 0xb12c, 16, 16, &timing[2] },
238 { 0xdc2c, 8, 8, &timing[2] },
239 { 0xcc2c, 16, 16, &timing[2] },
240 { 0xba20, 16, 16, &timing[3] },
241};
242
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100243#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200244static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
245static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
246
247static struct nand_bbt_descr bbt_main_descr = {
248 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
249 | NAND_BBT_2BIT | NAND_BBT_VERSION,
250 .offs = 8,
251 .len = 6,
252 .veroffs = 14,
253 .maxblocks = 8, /* Last 8 blocks in each chip */
254 .pattern = bbt_pattern
255};
256
257static struct nand_bbt_descr bbt_mirror_descr = {
258 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
259 | NAND_BBT_2BIT | NAND_BBT_VERSION,
260 .offs = 8,
261 .len = 6,
262 .veroffs = 14,
263 .maxblocks = 8, /* Last 8 blocks in each chip */
264 .pattern = bbt_mirror_pattern
265};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100266#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200267
268static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
269 .eccbytes = 32,
270 .eccpos = {
271 32, 33, 34, 35, 36, 37, 38, 39,
272 40, 41, 42, 43, 44, 45, 46, 47,
273 48, 49, 50, 51, 52, 53, 54, 55,
274 56, 57, 58, 59, 60, 61, 62, 63},
275 .oobfree = { {2, 30} }
276};
277
278static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
279 .eccbytes = 64,
280 .eccpos = {
281 32, 33, 34, 35, 36, 37, 38, 39,
282 40, 41, 42, 43, 44, 45, 46, 47,
283 48, 49, 50, 51, 52, 53, 54, 55,
284 56, 57, 58, 59, 60, 61, 62, 63,
285 96, 97, 98, 99, 100, 101, 102, 103,
286 104, 105, 106, 107, 108, 109, 110, 111,
287 112, 113, 114, 115, 116, 117, 118, 119,
288 120, 121, 122, 123, 124, 125, 126, 127},
289 /* Bootrom looks in bytes 0 & 5 for bad blocks */
290 .oobfree = { {6, 26}, { 64, 32} }
291};
292
293static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
294 .eccbytes = 128,
295 .eccpos = {
296 32, 33, 34, 35, 36, 37, 38, 39,
297 40, 41, 42, 43, 44, 45, 46, 47,
298 48, 49, 50, 51, 52, 53, 54, 55,
299 56, 57, 58, 59, 60, 61, 62, 63},
300 .oobfree = { }
301};
302
303#define NDTR0_tCH(c) (min((c), 7) << 19)
304#define NDTR0_tCS(c) (min((c), 7) << 16)
305#define NDTR0_tWH(c) (min((c), 7) << 11)
306#define NDTR0_tWP(c) (min((c), 7) << 8)
307#define NDTR0_tRH(c) (min((c), 7) << 3)
308#define NDTR0_tRP(c) (min((c), 7) << 0)
309
310#define NDTR1_tR(c) (min((c), 65535) << 16)
311#define NDTR1_tWHR(c) (min((c), 15) << 4)
312#define NDTR1_tAR(c) (min((c), 15) << 0)
313
314/* convert nano-seconds to nand flash controller clock cycles */
315#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
316
317static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
318{
319 /* We only support the Armada 370/XP/38x for now */
320 return PXA3XX_NAND_VARIANT_ARMADA370;
321}
322
323static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
324 const struct pxa3xx_nand_timing *t)
325{
326 struct pxa3xx_nand_info *info = host->info_data;
327 unsigned long nand_clk = mvebu_get_nand_clock();
328 uint32_t ndtr0, ndtr1;
329
330 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
331 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
332 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
333 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
334 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
335 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
336
337 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
338 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
339 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
340
341 info->ndtr0cs0 = ndtr0;
342 info->ndtr1cs0 = ndtr1;
343 nand_writel(info, NDTR0CS0, ndtr0);
344 nand_writel(info, NDTR1CS0, ndtr1);
345}
346
347static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
348 const struct nand_sdr_timings *t)
349{
350 struct pxa3xx_nand_info *info = host->info_data;
351 struct nand_chip *chip = &host->chip;
352 unsigned long nand_clk = mvebu_get_nand_clock();
353 uint32_t ndtr0, ndtr1;
354
355 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
356 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
357 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300358 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200359 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300360 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200361 u32 tR = chip->chip_delay * 1000;
362 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
363 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
364
365 /* fallback to a default value if tR = 0 */
366 if (!tR)
367 tR = 20000;
368
369 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
370 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
371 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
372 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
373 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
374 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
375
376 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
377 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
378 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
379
380 info->ndtr0cs0 = ndtr0;
381 info->ndtr1cs0 = ndtr1;
382 nand_writel(info, NDTR0CS0, ndtr0);
383 nand_writel(info, NDTR1CS0, ndtr1);
384}
385
386static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
387{
388 const struct nand_sdr_timings *timings;
389 struct nand_chip *chip = &host->chip;
390 struct pxa3xx_nand_info *info = host->info_data;
391 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300392 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200393 int mode, id, ntypes, i;
394
395 mode = onfi_get_async_timing_mode(chip);
396 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
397 ntypes = ARRAY_SIZE(builtin_flash_types);
398
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300399 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200400
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300401 id = chip->read_byte(mtd);
402 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200403
404 for (i = 0; i < ntypes; i++) {
405 f = &builtin_flash_types[i];
406
407 if (f->chip_id == id)
408 break;
409 }
410
411 if (i == ntypes) {
412 dev_err(&info->pdev->dev, "Error: timings not found\n");
413 return -EINVAL;
414 }
415
416 pxa3xx_nand_set_timing(host, f->timing);
417
418 if (f->flash_width == 16) {
419 info->reg_ndcr |= NDCR_DWIDTH_M;
420 chip->options |= NAND_BUSWIDTH_16;
421 }
422
423 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
424 } else {
425 mode = fls(mode) - 1;
426 if (mode < 0)
427 mode = 0;
428
429 timings = onfi_async_timing_mode_to_sdr_timings(mode);
430 if (IS_ERR(timings))
431 return PTR_ERR(timings);
432
433 pxa3xx_nand_set_sdr_timing(host, timings);
434 }
435
436 return 0;
437}
438
439/*
440 * Set the data and OOB size, depending on the selected
441 * spare and ECC configuration.
442 * Only applicable to READ0, READOOB and PAGEPROG commands.
443 */
444static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
445 struct mtd_info *mtd)
446{
447 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
448
449 info->data_size = mtd->writesize;
450 if (!oob_enable)
451 return;
452
453 info->oob_size = info->spare_size;
454 if (!info->use_ecc)
455 info->oob_size += info->ecc_size;
456}
457
458/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800459 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200460 * command buffer, otherwise, it does not work.
461 * We enable all the interrupt at the same time, and
462 * let pxa3xx_nand_irq to handle all logic.
463 */
464static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
465{
466 uint32_t ndcr;
467
468 ndcr = info->reg_ndcr;
469
470 if (info->use_ecc) {
471 ndcr |= NDCR_ECC_EN;
472 if (info->ecc_bch)
473 nand_writel(info, NDECCCTRL, 0x1);
474 } else {
475 ndcr &= ~NDCR_ECC_EN;
476 if (info->ecc_bch)
477 nand_writel(info, NDECCCTRL, 0x0);
478 }
479
480 ndcr &= ~NDCR_DMA_EN;
481
482 if (info->use_spare)
483 ndcr |= NDCR_SPARE_EN;
484 else
485 ndcr &= ~NDCR_SPARE_EN;
486
487 ndcr |= NDCR_ND_RUN;
488
489 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200490 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300491 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200492 nand_writel(info, NDCR, ndcr);
493}
494
495static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
496{
497 uint32_t ndcr;
498
499 ndcr = nand_readl(info, NDCR);
500 nand_writel(info, NDCR, ndcr | int_mask);
501}
502
503static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
504{
505 if (info->ecc_bch) {
506 u32 ts;
507
508 /*
509 * According to the datasheet, when reading from NDDB
510 * with BCH enabled, after each 32 bytes reads, we
511 * have to make sure that the NDSR.RDDREQ bit is set.
512 *
513 * Drain the FIFO 8 32 bits reads at a time, and skip
514 * the polling on the last read.
515 */
516 while (len > 8) {
517 readsl(info->mmio_base + NDDB, data, 8);
518
519 ts = get_timer(0);
520 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
521 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
522 dev_err(&info->pdev->dev,
523 "Timeout on RDDREQ while draining the FIFO\n");
524 return;
525 }
526 }
527
528 data += 32;
529 len -= 8;
530 }
531 }
532
533 readsl(info->mmio_base + NDDB, data, len);
534}
535
536static void handle_data_pio(struct pxa3xx_nand_info *info)
537{
538 unsigned int do_bytes = min(info->data_size, info->chunk_size);
539
540 switch (info->state) {
541 case STATE_PIO_WRITING:
542 writesl(info->mmio_base + NDDB,
543 info->data_buff + info->data_buff_pos,
544 DIV_ROUND_UP(do_bytes, 4));
545
546 if (info->oob_size > 0)
547 writesl(info->mmio_base + NDDB,
548 info->oob_buff + info->oob_buff_pos,
549 DIV_ROUND_UP(info->oob_size, 4));
550 break;
551 case STATE_PIO_READING:
552 drain_fifo(info,
553 info->data_buff + info->data_buff_pos,
554 DIV_ROUND_UP(do_bytes, 4));
555
556 if (info->oob_size > 0)
557 drain_fifo(info,
558 info->oob_buff + info->oob_buff_pos,
559 DIV_ROUND_UP(info->oob_size, 4));
560 break;
561 default:
562 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
563 info->state);
564 BUG();
565 }
566
567 /* Update buffer pointers for multi-page read/write */
568 info->data_buff_pos += do_bytes;
569 info->oob_buff_pos += info->oob_size;
570 info->data_size -= do_bytes;
571}
572
573static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
574{
575 handle_data_pio(info);
576
577 info->state = STATE_CMD_DONE;
578 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
579}
580
581static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
582{
583 unsigned int status, is_completed = 0, is_ready = 0;
584 unsigned int ready, cmd_done;
585 irqreturn_t ret = IRQ_HANDLED;
586
587 if (info->cs == 0) {
588 ready = NDSR_FLASH_RDY;
589 cmd_done = NDSR_CS0_CMDD;
590 } else {
591 ready = NDSR_RDY;
592 cmd_done = NDSR_CS1_CMDD;
593 }
594
595 status = nand_readl(info, NDSR);
596
597 if (status & NDSR_UNCORERR)
598 info->retcode = ERR_UNCORERR;
599 if (status & NDSR_CORERR) {
600 info->retcode = ERR_CORERR;
601 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
602 info->ecc_bch)
603 info->ecc_err_cnt = NDSR_ERR_CNT(status);
604 else
605 info->ecc_err_cnt = 1;
606
607 /*
608 * Each chunk composing a page is corrected independently,
609 * and we need to store maximum number of corrected bitflips
610 * to return it to the MTD layer in ecc.read_page().
611 */
612 info->max_bitflips = max_t(unsigned int,
613 info->max_bitflips,
614 info->ecc_err_cnt);
615 }
616 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
617 info->state = (status & NDSR_RDDREQ) ?
618 STATE_PIO_READING : STATE_PIO_WRITING;
619 /* Call the IRQ thread in U-Boot directly */
620 pxa3xx_nand_irq_thread(info);
621 return 0;
622 }
623 if (status & cmd_done) {
624 info->state = STATE_CMD_DONE;
625 is_completed = 1;
626 }
627 if (status & ready) {
628 info->state = STATE_READY;
629 is_ready = 1;
630 }
631
Ofer Heifetzde323162018-08-29 11:56:04 +0300632 /*
633 * Clear all status bit before issuing the next command, which
634 * can and will alter the status bits and will deserve a new
635 * interrupt on its own. This lets the controller exit the IRQ
636 */
637 nand_writel(info, NDSR, status);
638
Stefan Roese75659da2015-07-23 10:26:16 +0200639 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200640 status &= ~NDSR_WRCMDREQ;
641 info->state = STATE_CMD_HANDLE;
642
643 /*
644 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
645 * must be loaded by writing directly either 12 or 16
646 * bytes directly to NDCB0, four bytes at a time.
647 *
648 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
649 * but each NDCBx register can be read.
650 */
651 nand_writel(info, NDCB0, info->ndcb0);
652 nand_writel(info, NDCB0, info->ndcb1);
653 nand_writel(info, NDCB0, info->ndcb2);
654
655 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
656 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
657 nand_writel(info, NDCB0, info->ndcb3);
658 }
659
Stefan Roese75659da2015-07-23 10:26:16 +0200660 if (is_completed)
661 info->cmd_complete = 1;
662 if (is_ready)
663 info->dev_ready = 1;
664
665 return ret;
666}
667
668static inline int is_buf_blank(uint8_t *buf, size_t len)
669{
670 for (; len > 0; len--)
671 if (*buf++ != 0xff)
672 return 0;
673 return 1;
674}
675
676static void set_command_address(struct pxa3xx_nand_info *info,
677 unsigned int page_size, uint16_t column, int page_addr)
678{
679 /* small page addr setting */
680 if (page_size < PAGE_CHUNK_SIZE) {
681 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
682 | (column & 0xFF);
683
684 info->ndcb2 = 0;
685 } else {
686 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
687 | (column & 0xFFFF);
688
689 if (page_addr & 0xFF0000)
690 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
691 else
692 info->ndcb2 = 0;
693 }
694}
695
696static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
697{
698 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300699 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200700
701 /* reset data and oob column point to handle data */
702 info->buf_start = 0;
703 info->buf_count = 0;
704 info->oob_size = 0;
705 info->data_buff_pos = 0;
706 info->oob_buff_pos = 0;
707 info->use_ecc = 0;
708 info->use_spare = 1;
709 info->retcode = ERR_NONE;
710 info->ecc_err_cnt = 0;
711 info->ndcb3 = 0;
712 info->need_wait = 0;
713
714 switch (command) {
715 case NAND_CMD_READ0:
716 case NAND_CMD_PAGEPROG:
717 info->use_ecc = 1;
718 case NAND_CMD_READOOB:
719 pxa3xx_set_datasize(info, mtd);
720 break;
721 case NAND_CMD_PARAM:
722 info->use_spare = 0;
723 break;
724 default:
725 info->ndcb1 = 0;
726 info->ndcb2 = 0;
727 break;
728 }
729
730 /*
731 * If we are about to issue a read command, or about to set
732 * the write address, then clean the data buffer.
733 */
734 if (command == NAND_CMD_READ0 ||
735 command == NAND_CMD_READOOB ||
736 command == NAND_CMD_SEQIN) {
737 info->buf_count = mtd->writesize + mtd->oobsize;
738 memset(info->data_buff, 0xFF, info->buf_count);
739 }
740}
741
742static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
743 int ext_cmd_type, uint16_t column, int page_addr)
744{
745 int addr_cycle, exec_cmd;
746 struct pxa3xx_nand_host *host;
747 struct mtd_info *mtd;
748
749 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300750 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200751 addr_cycle = 0;
752 exec_cmd = 1;
753
754 if (info->cs != 0)
755 info->ndcb0 = NDCB0_CSEL;
756 else
757 info->ndcb0 = 0;
758
759 if (command == NAND_CMD_SEQIN)
760 exec_cmd = 0;
761
762 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
763 + host->col_addr_cycles);
764
765 switch (command) {
766 case NAND_CMD_READOOB:
767 case NAND_CMD_READ0:
768 info->buf_start = column;
769 info->ndcb0 |= NDCB0_CMD_TYPE(0)
770 | addr_cycle
771 | NAND_CMD_READ0;
772
773 if (command == NAND_CMD_READOOB)
774 info->buf_start += mtd->writesize;
775
776 /*
777 * Multiple page read needs an 'extended command type' field,
778 * which is either naked-read or last-read according to the
779 * state.
780 */
781 if (mtd->writesize == PAGE_CHUNK_SIZE) {
782 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
783 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
784 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
785 | NDCB0_LEN_OVRD
786 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
787 info->ndcb3 = info->chunk_size +
788 info->oob_size;
789 }
790
791 set_command_address(info, mtd->writesize, column, page_addr);
792 break;
793
794 case NAND_CMD_SEQIN:
795
796 info->buf_start = column;
797 set_command_address(info, mtd->writesize, 0, page_addr);
798
799 /*
800 * Multiple page programming needs to execute the initial
801 * SEQIN command that sets the page address.
802 */
803 if (mtd->writesize > PAGE_CHUNK_SIZE) {
804 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
805 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
806 | addr_cycle
807 | command;
808 /* No data transfer in this case */
809 info->data_size = 0;
810 exec_cmd = 1;
811 }
812 break;
813
814 case NAND_CMD_PAGEPROG:
815 if (is_buf_blank(info->data_buff,
816 (mtd->writesize + mtd->oobsize))) {
817 exec_cmd = 0;
818 break;
819 }
820
821 /* Second command setting for large pages */
822 if (mtd->writesize > PAGE_CHUNK_SIZE) {
823 /*
824 * Multiple page write uses the 'extended command'
825 * field. This can be used to issue a command dispatch
826 * or a naked-write depending on the current stage.
827 */
828 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
829 | NDCB0_LEN_OVRD
830 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
831 info->ndcb3 = info->chunk_size +
832 info->oob_size;
833
834 /*
835 * This is the command dispatch that completes a chunked
836 * page program operation.
837 */
838 if (info->data_size == 0) {
839 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
840 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
841 | command;
842 info->ndcb1 = 0;
843 info->ndcb2 = 0;
844 info->ndcb3 = 0;
845 }
846 } else {
847 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
848 | NDCB0_AUTO_RS
849 | NDCB0_ST_ROW_EN
850 | NDCB0_DBC
851 | (NAND_CMD_PAGEPROG << 8)
852 | NAND_CMD_SEQIN
853 | addr_cycle;
854 }
855 break;
856
857 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300858 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200859 info->ndcb0 |= NDCB0_CMD_TYPE(0)
860 | NDCB0_ADDR_CYC(1)
861 | NDCB0_LEN_OVRD
862 | command;
863 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300864 info->ndcb3 = INIT_BUFFER_SIZE;
865 info->data_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200866 break;
867
868 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300869 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +0200870 info->ndcb0 |= NDCB0_CMD_TYPE(3)
871 | NDCB0_ADDR_CYC(1)
872 | command;
873 info->ndcb1 = (column & 0xFF);
874
875 info->data_size = 8;
876 break;
877 case NAND_CMD_STATUS:
878 info->buf_count = 1;
879 info->ndcb0 |= NDCB0_CMD_TYPE(4)
880 | NDCB0_ADDR_CYC(1)
881 | command;
882
883 info->data_size = 8;
884 break;
885
886 case NAND_CMD_ERASE1:
887 info->ndcb0 |= NDCB0_CMD_TYPE(2)
888 | NDCB0_AUTO_RS
889 | NDCB0_ADDR_CYC(3)
890 | NDCB0_DBC
891 | (NAND_CMD_ERASE2 << 8)
892 | NAND_CMD_ERASE1;
893 info->ndcb1 = page_addr;
894 info->ndcb2 = 0;
895
896 break;
897 case NAND_CMD_RESET:
898 info->ndcb0 |= NDCB0_CMD_TYPE(5)
899 | command;
900
901 break;
902
903 case NAND_CMD_ERASE2:
904 exec_cmd = 0;
905 break;
906
907 default:
908 exec_cmd = 0;
909 dev_err(&info->pdev->dev, "non-supported command %x\n",
910 command);
911 break;
912 }
913
914 return exec_cmd;
915}
916
917static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
918 int column, int page_addr)
919{
Scott Wood17fed142016-05-30 13:57:56 -0500920 struct nand_chip *chip = mtd_to_nand(mtd);
921 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200922 struct pxa3xx_nand_info *info = host->info_data;
923 int exec_cmd;
924
925 /*
926 * if this is a x16 device ,then convert the input
927 * "byte" address into a "word" address appropriate
928 * for indexing a word-oriented device
929 */
930 if (info->reg_ndcr & NDCR_DWIDTH_M)
931 column /= 2;
932
933 /*
934 * There may be different NAND chip hooked to
935 * different chip select, so check whether
936 * chip select has been changed, if yes, reset the timing
937 */
938 if (info->cs != host->cs) {
939 info->cs = host->cs;
940 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
941 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
942 }
943
944 prepare_start_command(info, command);
945
946 info->state = STATE_PREPARED;
947 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
948
949 if (exec_cmd) {
950 u32 ts;
951
952 info->cmd_complete = 0;
953 info->dev_ready = 0;
954 info->need_wait = 1;
955 pxa3xx_nand_start(info);
956
957 ts = get_timer(0);
958 while (1) {
959 u32 status;
960
961 status = nand_readl(info, NDSR);
962 if (status)
963 pxa3xx_nand_irq(info);
964
965 if (info->cmd_complete)
966 break;
967
968 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
969 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
970 return;
971 }
972 }
973 }
974 info->state = STATE_IDLE;
975}
976
977static void nand_cmdfunc_extended(struct mtd_info *mtd,
978 const unsigned command,
979 int column, int page_addr)
980{
Scott Wood17fed142016-05-30 13:57:56 -0500981 struct nand_chip *chip = mtd_to_nand(mtd);
982 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200983 struct pxa3xx_nand_info *info = host->info_data;
984 int exec_cmd, ext_cmd_type;
985
986 /*
987 * if this is a x16 device then convert the input
988 * "byte" address into a "word" address appropriate
989 * for indexing a word-oriented device
990 */
991 if (info->reg_ndcr & NDCR_DWIDTH_M)
992 column /= 2;
993
994 /*
995 * There may be different NAND chip hooked to
996 * different chip select, so check whether
997 * chip select has been changed, if yes, reset the timing
998 */
999 if (info->cs != host->cs) {
1000 info->cs = host->cs;
1001 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1002 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1003 }
1004
1005 /* Select the extended command for the first command */
1006 switch (command) {
1007 case NAND_CMD_READ0:
1008 case NAND_CMD_READOOB:
1009 ext_cmd_type = EXT_CMD_TYPE_MONO;
1010 break;
1011 case NAND_CMD_SEQIN:
1012 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1013 break;
1014 case NAND_CMD_PAGEPROG:
1015 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1016 break;
1017 default:
1018 ext_cmd_type = 0;
1019 break;
1020 }
1021
1022 prepare_start_command(info, command);
1023
1024 /*
1025 * Prepare the "is ready" completion before starting a command
1026 * transaction sequence. If the command is not executed the
1027 * completion will be completed, see below.
1028 *
1029 * We can do that inside the loop because the command variable
1030 * is invariant and thus so is the exec_cmd.
1031 */
1032 info->need_wait = 1;
1033 info->dev_ready = 0;
1034
1035 do {
1036 u32 ts;
1037
1038 info->state = STATE_PREPARED;
1039 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1040 column, page_addr);
1041 if (!exec_cmd) {
1042 info->need_wait = 0;
1043 info->dev_ready = 1;
1044 break;
1045 }
1046
1047 info->cmd_complete = 0;
1048 pxa3xx_nand_start(info);
1049
1050 ts = get_timer(0);
1051 while (1) {
1052 u32 status;
1053
1054 status = nand_readl(info, NDSR);
1055 if (status)
1056 pxa3xx_nand_irq(info);
1057
1058 if (info->cmd_complete)
1059 break;
1060
1061 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1062 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1063 return;
1064 }
1065 }
1066
1067 /* Check if the sequence is complete */
1068 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1069 break;
1070
1071 /*
1072 * After a splitted program command sequence has issued
1073 * the command dispatch, the command sequence is complete.
1074 */
1075 if (info->data_size == 0 &&
1076 command == NAND_CMD_PAGEPROG &&
1077 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1078 break;
1079
1080 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1081 /* Last read: issue a 'last naked read' */
1082 if (info->data_size == info->chunk_size)
1083 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1084 else
1085 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1086
1087 /*
1088 * If a splitted program command has no more data to transfer,
1089 * the command dispatch must be issued to complete.
1090 */
1091 } else if (command == NAND_CMD_PAGEPROG &&
1092 info->data_size == 0) {
1093 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1094 }
1095 } while (1);
1096
1097 info->state = STATE_IDLE;
1098}
1099
1100static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001101 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1102 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001103{
1104 chip->write_buf(mtd, buf, mtd->writesize);
1105 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1106
1107 return 0;
1108}
1109
1110static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1111 struct nand_chip *chip, uint8_t *buf, int oob_required,
1112 int page)
1113{
Scott Wood17fed142016-05-30 13:57:56 -05001114 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001115 struct pxa3xx_nand_info *info = host->info_data;
1116
1117 chip->read_buf(mtd, buf, mtd->writesize);
1118 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1119
1120 if (info->retcode == ERR_CORERR && info->use_ecc) {
1121 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1122
1123 } else if (info->retcode == ERR_UNCORERR) {
1124 /*
1125 * for blank page (all 0xff), HW will calculate its ECC as
1126 * 0, which is different from the ECC information within
1127 * OOB, ignore such uncorrectable errors
1128 */
1129 if (is_buf_blank(buf, mtd->writesize))
1130 info->retcode = ERR_NONE;
1131 else
1132 mtd->ecc_stats.failed++;
1133 }
1134
1135 return info->max_bitflips;
1136}
1137
1138static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1139{
Scott Wood17fed142016-05-30 13:57:56 -05001140 struct nand_chip *chip = mtd_to_nand(mtd);
1141 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001142 struct pxa3xx_nand_info *info = host->info_data;
1143 char retval = 0xFF;
1144
1145 if (info->buf_start < info->buf_count)
1146 /* Has just send a new command? */
1147 retval = info->data_buff[info->buf_start++];
1148
1149 return retval;
1150}
1151
1152static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1153{
Scott Wood17fed142016-05-30 13:57:56 -05001154 struct nand_chip *chip = mtd_to_nand(mtd);
1155 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001156 struct pxa3xx_nand_info *info = host->info_data;
1157 u16 retval = 0xFFFF;
1158
1159 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1160 retval = *((u16 *)(info->data_buff+info->buf_start));
1161 info->buf_start += 2;
1162 }
1163 return retval;
1164}
1165
1166static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1167{
Scott Wood17fed142016-05-30 13:57:56 -05001168 struct nand_chip *chip = mtd_to_nand(mtd);
1169 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001170 struct pxa3xx_nand_info *info = host->info_data;
1171 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1172
1173 memcpy(buf, info->data_buff + info->buf_start, real_len);
1174 info->buf_start += real_len;
1175}
1176
1177static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1178 const uint8_t *buf, int len)
1179{
Scott Wood17fed142016-05-30 13:57:56 -05001180 struct nand_chip *chip = mtd_to_nand(mtd);
1181 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001182 struct pxa3xx_nand_info *info = host->info_data;
1183 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1184
1185 memcpy(info->data_buff + info->buf_start, buf, real_len);
1186 info->buf_start += real_len;
1187}
1188
1189static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1190{
1191 return;
1192}
1193
1194static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1195{
Scott Wood17fed142016-05-30 13:57:56 -05001196 struct nand_chip *chip = mtd_to_nand(mtd);
1197 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001198 struct pxa3xx_nand_info *info = host->info_data;
1199
1200 if (info->need_wait) {
1201 u32 ts;
1202
1203 info->need_wait = 0;
1204
1205 ts = get_timer(0);
1206 while (1) {
1207 u32 status;
1208
1209 status = nand_readl(info, NDSR);
1210 if (status)
1211 pxa3xx_nand_irq(info);
1212
1213 if (info->dev_ready)
1214 break;
1215
1216 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1217 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1218 return NAND_STATUS_FAIL;
1219 }
1220 }
1221 }
1222
1223 /* pxa3xx_nand_send_command has waited for command complete */
1224 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1225 if (info->retcode == ERR_NONE)
1226 return 0;
1227 else
1228 return NAND_STATUS_FAIL;
1229 }
1230
1231 return NAND_STATUS_READY;
1232}
1233
Ofer Heifetz531816e2018-08-29 11:56:07 +03001234static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1235{
1236 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1237
1238 /* Configure default flash values */
1239 info->chunk_size = PAGE_CHUNK_SIZE;
1240 info->reg_ndcr = 0x0; /* enable all interrupts */
1241 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1242 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1243 info->reg_ndcr |= NDCR_SPARE_EN;
1244
1245 return 0;
1246}
1247
1248static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001249{
1250 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001251 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001252 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001253
1254 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1255 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1256 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001257}
1258
Ofer Heifetz268979f2018-08-29 11:56:08 +03001259static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001260{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001261 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001262 uint32_t ndcr = nand_readl(info, NDCR);
1263
Stefan Roese75659da2015-07-23 10:26:16 +02001264 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001265 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001266 info->reg_ndcr = ndcr &
1267 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1268 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001269 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1270 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001271}
1272
1273static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1274{
1275 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1276 if (info->data_buff == NULL)
1277 return -ENOMEM;
1278 return 0;
1279}
1280
1281static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1282{
1283 struct pxa3xx_nand_info *info = host->info_data;
1284 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1285 struct mtd_info *mtd;
1286 struct nand_chip *chip;
1287 const struct nand_sdr_timings *timings;
1288 int ret;
1289
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001290 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001291 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001292
1293 /* configure default flash values */
1294 info->reg_ndcr = 0x0; /* enable all interrupts */
1295 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001296 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001297 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1298
1299 /* use the common timing to make a try */
1300 timings = onfi_async_timing_mode_to_sdr_timings(0);
1301 if (IS_ERR(timings))
1302 return PTR_ERR(timings);
1303
1304 pxa3xx_nand_set_sdr_timing(host, timings);
1305
1306 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1307 ret = chip->waitfunc(mtd, chip);
1308 if (ret & NAND_STATUS_FAIL)
1309 return -ENODEV;
1310
1311 return 0;
1312}
1313
1314static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1315 struct nand_ecc_ctrl *ecc,
1316 int strength, int ecc_stepsize, int page_size)
1317{
1318 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1319 info->chunk_size = 2048;
1320 info->spare_size = 40;
1321 info->ecc_size = 24;
1322 ecc->mode = NAND_ECC_HW;
1323 ecc->size = 512;
1324 ecc->strength = 1;
1325
1326 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1327 info->chunk_size = 512;
1328 info->spare_size = 8;
1329 info->ecc_size = 8;
1330 ecc->mode = NAND_ECC_HW;
1331 ecc->size = 512;
1332 ecc->strength = 1;
1333
1334 /*
1335 * Required ECC: 4-bit correction per 512 bytes
1336 * Select: 16-bit correction per 2048 bytes
1337 */
1338 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1339 info->ecc_bch = 1;
1340 info->chunk_size = 2048;
1341 info->spare_size = 32;
1342 info->ecc_size = 32;
1343 ecc->mode = NAND_ECC_HW;
1344 ecc->size = info->chunk_size;
1345 ecc->layout = &ecc_layout_2KB_bch4bit;
1346 ecc->strength = 16;
1347
1348 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1349 info->ecc_bch = 1;
1350 info->chunk_size = 2048;
1351 info->spare_size = 32;
1352 info->ecc_size = 32;
1353 ecc->mode = NAND_ECC_HW;
1354 ecc->size = info->chunk_size;
1355 ecc->layout = &ecc_layout_4KB_bch4bit;
1356 ecc->strength = 16;
1357
1358 /*
1359 * Required ECC: 8-bit correction per 512 bytes
1360 * Select: 16-bit correction per 1024 bytes
1361 */
1362 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1363 info->ecc_bch = 1;
1364 info->chunk_size = 1024;
1365 info->spare_size = 0;
1366 info->ecc_size = 32;
1367 ecc->mode = NAND_ECC_HW;
1368 ecc->size = info->chunk_size;
1369 ecc->layout = &ecc_layout_4KB_bch8bit;
1370 ecc->strength = 16;
1371 } else {
1372 dev_err(&info->pdev->dev,
1373 "ECC strength %d at page size %d is not supported\n",
1374 strength, page_size);
1375 return -ENODEV;
1376 }
1377
1378 return 0;
1379}
1380
1381static int pxa3xx_nand_scan(struct mtd_info *mtd)
1382{
Scott Wood17fed142016-05-30 13:57:56 -05001383 struct nand_chip *chip = mtd_to_nand(mtd);
1384 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001385 struct pxa3xx_nand_info *info = host->info_data;
1386 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001387 int ret;
1388 uint16_t ecc_strength, ecc_step;
1389
Ofer Heifetz268979f2018-08-29 11:56:08 +03001390 if (pdata->keep_config) {
1391 pxa3xx_nand_detect_config(info);
1392 } else {
1393 ret = pxa3xx_nand_config_ident(info);
1394 if (ret)
1395 return ret;
1396 ret = pxa3xx_nand_sensing(host);
1397 if (ret) {
1398 dev_info(&info->pdev->dev,
1399 "There is no chip on cs %d!\n",
1400 info->cs);
1401 return ret;
1402 }
Stefan Roese75659da2015-07-23 10:26:16 +02001403 }
1404
Stefan Roese75659da2015-07-23 10:26:16 +02001405 /* Device detection must be done with ECC disabled */
1406 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1407 nand_writel(info, NDECCCTRL, 0x0);
1408
1409 if (nand_scan_ident(mtd, 1, NULL))
1410 return -ENODEV;
1411
1412 if (!pdata->keep_config) {
1413 ret = pxa3xx_nand_init_timings(host);
1414 if (ret) {
1415 dev_err(&info->pdev->dev,
1416 "Failed to set timings: %d\n", ret);
1417 return ret;
1418 }
1419 }
1420
Stefan Roese75659da2015-07-23 10:26:16 +02001421#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1422 /*
1423 * We'll use a bad block table stored in-flash and don't
1424 * allow writing the bad block marker to the flash.
1425 */
1426 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1427 chip->bbt_td = &bbt_main_descr;
1428 chip->bbt_md = &bbt_mirror_descr;
1429#endif
1430
1431 /*
1432 * If the page size is bigger than the FIFO size, let's check
1433 * we are given the right variant and then switch to the extended
1434 * (aka splitted) command handling,
1435 */
1436 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1437 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1438 chip->cmdfunc = nand_cmdfunc_extended;
1439 } else {
1440 dev_err(&info->pdev->dev,
1441 "unsupported page size on this variant\n");
1442 return -ENODEV;
1443 }
1444 }
1445
1446 if (pdata->ecc_strength && pdata->ecc_step_size) {
1447 ecc_strength = pdata->ecc_strength;
1448 ecc_step = pdata->ecc_step_size;
1449 } else {
1450 ecc_strength = chip->ecc_strength_ds;
1451 ecc_step = chip->ecc_step_ds;
1452 }
1453
1454 /* Set default ECC strength requirements on non-ONFI devices */
1455 if (ecc_strength < 1 && ecc_step < 1) {
1456 ecc_strength = 1;
1457 ecc_step = 512;
1458 }
1459
1460 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1461 ecc_step, mtd->writesize);
1462 if (ret)
1463 return ret;
1464
1465 /* calculate addressing information */
1466 if (mtd->writesize >= 2048)
1467 host->col_addr_cycles = 2;
1468 else
1469 host->col_addr_cycles = 1;
1470
1471 /* release the initial buffer */
1472 kfree(info->data_buff);
1473
1474 /* allocate the real data + oob buffer */
1475 info->buf_size = mtd->writesize + mtd->oobsize;
1476 ret = pxa3xx_nand_init_buff(info);
1477 if (ret)
1478 return ret;
1479 info->oob_buff = info->data_buff + mtd->writesize;
1480
1481 if ((mtd->size >> chip->page_shift) > 65536)
1482 host->row_addr_cycles = 3;
1483 else
1484 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001485
1486 if (!pdata->keep_config)
1487 pxa3xx_nand_config_tail(info);
1488
Stefan Roese75659da2015-07-23 10:26:16 +02001489 return nand_scan_tail(mtd);
1490}
1491
1492static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1493{
1494 struct pxa3xx_nand_platform_data *pdata;
1495 struct pxa3xx_nand_host *host;
1496 struct nand_chip *chip = NULL;
1497 struct mtd_info *mtd;
1498 int ret, cs;
1499
1500 pdata = info->pdata;
1501 if (pdata->num_cs <= 0)
1502 return -ENODEV;
1503
1504 info->variant = pxa3xx_nand_get_variant();
1505 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001506 chip = (struct nand_chip *)
1507 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001508 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001509 host = (struct pxa3xx_nand_host *)chip;
1510 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001511 host->cs = cs;
1512 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001513 mtd->owner = THIS_MODULE;
1514
Chris Packham3c2170a2016-08-29 15:20:52 +12001515 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001516 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1517 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1518 chip->controller = &info->controller;
1519 chip->waitfunc = pxa3xx_nand_waitfunc;
1520 chip->select_chip = pxa3xx_nand_select_chip;
1521 chip->read_word = pxa3xx_nand_read_word;
1522 chip->read_byte = pxa3xx_nand_read_byte;
1523 chip->read_buf = pxa3xx_nand_read_buf;
1524 chip->write_buf = pxa3xx_nand_write_buf;
1525 chip->options |= NAND_NO_SUBPAGE_WRITE;
1526 chip->cmdfunc = nand_cmdfunc;
1527 }
1528
Stefan Roese75659da2015-07-23 10:26:16 +02001529 /* Allocate a buffer to allow flash detection */
1530 info->buf_size = INIT_BUFFER_SIZE;
1531 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1532 if (info->data_buff == NULL) {
1533 ret = -ENOMEM;
1534 goto fail_disable_clk;
1535 }
1536
1537 /* initialize all interrupts to be disabled */
1538 disable_int(info, NDSR_MASK);
1539
1540 return 0;
1541
1542 kfree(info->data_buff);
1543fail_disable_clk:
1544 return ret;
1545}
1546
1547static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1548{
1549 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001550 const void *blob = gd->fdt_blob;
1551 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001552
1553 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1554 if (!pdata)
1555 return -ENOMEM;
1556
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001557 /* Get address decoding nodes from the FDT blob */
1558 do {
1559 node = fdt_node_offset_by_compatible(blob, node,
1560 "marvell,mvebu-pxa3xx-nand");
1561 if (node < 0)
1562 break;
1563
1564 /* Bypass disabeld nodes */
1565 if (!fdtdec_get_is_enabled(blob, node))
1566 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001567
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001568 /* Get the first enabled NAND controler base address */
1569 info->mmio_base =
1570 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1571 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001572
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001573 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1574 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001575 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001576 break;
1577 }
1578
1579 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1580 pdata->enable_arbiter = 1;
1581
1582 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1583 pdata->keep_config = 1;
1584
1585 /*
1586 * ECC parameters.
1587 * If these are not set, they will be selected according
1588 * to the detected flash type.
1589 */
1590 /* ECC strength */
1591 pdata->ecc_strength = fdtdec_get_int(blob, node,
1592 "nand-ecc-strength", 0);
1593
1594 /* ECC step size */
1595 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1596 "nand-ecc-step-size", 0);
1597
1598 info->pdata = pdata;
1599
1600 /* Currently support only a single NAND controller */
1601 return 0;
1602
1603 } while (node >= 0);
1604
1605 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001606}
1607
1608static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1609{
1610 struct pxa3xx_nand_platform_data *pdata;
1611 int ret, cs, probe_success;
1612
1613 ret = pxa3xx_nand_probe_dt(info);
1614 if (ret)
1615 return ret;
1616
1617 pdata = info->pdata;
1618
1619 ret = alloc_nand_resource(info);
1620 if (ret) {
1621 dev_err(&pdev->dev, "alloc nand resource failed\n");
1622 return ret;
1623 }
1624
1625 probe_success = 0;
1626 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001627 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001628
1629 /*
1630 * The mtd name matches the one used in 'mtdparts' kernel
1631 * parameter. This name cannot be changed or otherwise
1632 * user's mtd partitions configuration would get broken.
1633 */
1634 mtd->name = "pxa3xx_nand-0";
1635 info->cs = cs;
1636 ret = pxa3xx_nand_scan(mtd);
1637 if (ret) {
1638 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1639 cs);
1640 continue;
1641 }
1642
Scott Wood2c1b7e12016-05-30 13:57:55 -05001643 if (nand_register(cs, mtd))
1644 continue;
1645
1646 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001647 }
1648
1649 if (!probe_success)
1650 return -ENODEV;
1651
1652 return 0;
1653}
1654
1655/*
1656 * Main initialization routine
1657 */
1658void board_nand_init(void)
1659{
1660 struct pxa3xx_nand_info *info;
1661 struct pxa3xx_nand_host *host;
1662 int ret;
1663
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001664 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001665 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1666 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001667 if (!info)
1668 return;
1669
Stefan Roese75659da2015-07-23 10:26:16 +02001670 ret = pxa3xx_nand_probe(info);
1671 if (ret)
1672 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001673}